mirror of https://github.com/apache/druid.git
Fix various bugs; Enable more IntelliJ inspections and update error-prone (#6490)
* Fix various bugs; Enable more IntelliJ inspections and update error-prone * Fix NPE * Fix inspections * Remove unused imports
This commit is contained in:
parent
bcb754d066
commit
54351a5c75
|
@ -11,28 +11,35 @@
|
|||
<inspection_tool class="ArrayObjectsEquals" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ArraysAsListWithZeroOrOneArgument" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="AssertWithSideEffects" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CapturingCleaner" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CastConflictsWithInstanceof" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CastToIncompatibleInterface" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CatchMayIgnoreException" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="m_ignoreCatchBlocksWithComments" value="false" />
|
||||
</inspection_tool>
|
||||
<inspection_tool class="CheckValidXmlInScriptTagBody" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="ClassGetClass" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ClassNewInstance" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="CollectionAddedToSelf" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ComparableImplementedButEqualsNotOverridden" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="ComparatorMethodParameterNotUsed" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ComparatorResultComparison" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CompareToUsesNonFinalVariable" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ConstantAssertCondition" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="Contract" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CopyConstructorMissesField" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="CovariantEquals" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EmptyInitializer" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EmptyStatementBody" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="m_reportEmptyBlocks" value="true" />
|
||||
<option name="commentsAreContent" value="true" />
|
||||
</inspection_tool>
|
||||
<inspection_tool class="EndlessStream" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsAndHashcode" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsBetweenInconvertibleTypes" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsOnSuspiciousObject" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsUsesNonFinalVariable" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsWhichDoesntCheckParameterClass" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="EqualsWithItself" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="FieldCanBeLocal" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="EXCLUDE_ANNOS">
|
||||
|
@ -72,6 +79,17 @@
|
|||
<inspection_tool class="MathRandomCastToInt" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="MavenModelInspection" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="MismatchedArrayReadWrite" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="MismatchedCollectionQueryUpdate" enabled="true" level="ERROR" enabled_by_default="true">
|
||||
<option name="queryNames">
|
||||
<value />
|
||||
</option>
|
||||
<option name="updateNames">
|
||||
<value />
|
||||
</option>
|
||||
<option name="ignoredClasses">
|
||||
<value />
|
||||
</option>
|
||||
</inspection_tool>
|
||||
<inspection_tool class="MismatchedStringBuilderQueryUpdate" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="MissingOverrideAnnotation" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<scope name="NonGeneratedFiles" level="ERROR" enabled="true">
|
||||
|
@ -93,6 +111,7 @@
|
|||
</inspection_tool>
|
||||
<inspection_tool class="ObjectEqualsNull" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="ObjectToString" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="OverwrittenKey" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="PrimitiveArrayArgumentToVariableArgMethod" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="RedundantThrows" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="RedundantTypeArguments" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
|
@ -182,22 +201,38 @@
|
|||
<constraint name="E" regexp="java\.lang\.UnsupportedOperationException" within="" contains="" />
|
||||
<constraint name="x" minCount="0" maxCount="2147483647" within="" contains="" />
|
||||
</searchConfiguration>
|
||||
<searchConfiguration name="Use TypeReference<List<...>> instead" created="1539884261626" text="TypeReference<ArrayList<$E$>>" recursive="false" caseInsensitive="true" type="JAVA">
|
||||
<constraint name="__context__" target="true" within="" contains="" />
|
||||
<constraint name="E" within="" contains="" />
|
||||
</searchConfiguration>
|
||||
<searchConfiguration name="Use TypeReference<Map<...>> instead" created="1539884261626" text="TypeReference<HashMap<$K$, $V$>>" recursive="false" caseInsensitive="true" type="JAVA">
|
||||
<constraint name="__context__" target="true" within="" contains="" />
|
||||
<constraint name="K" within="" contains="" />
|
||||
<constraint name="V" within="" contains="" />
|
||||
</searchConfiguration>
|
||||
<searchConfiguration name="Use TypeReference<Set<...>> instead" created="1539884261626" text="TypeReference<HashSet<$E$>>" recursive="false" caseInsensitive="true" type="JAVA">
|
||||
<constraint name="__context__" target="true" within="" contains="" />
|
||||
<constraint name="E" within="" contains="" />
|
||||
</searchConfiguration>
|
||||
</inspection_tool>
|
||||
<inspection_tool class="SpellCheckingInspection" enabled="false" level="TYPO" enabled_by_default="false">
|
||||
<option name="processCode" value="true" />
|
||||
<option name="processLiterals" value="true" />
|
||||
<option name="processComments" value="true" />
|
||||
</inspection_tool>
|
||||
<inspection_tool class="StaticCallOnSubclass" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="StaticFieldReferenceOnSubclass" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="StaticCallOnSubclass" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StaticFieldReferenceOnSubclass" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StringConcatenationInFormatCall" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StringConcatenationInMessageFormatCall" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StringConcatenationMissingWhitespace" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="StringEquality" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StringEqualsCharSequence" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="StringTokenizerDelimiter" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="SubtractionInCompareTo" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="SubtractionInCompareTo" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="SuspiciousArrayCast" enabled="true" level="WARNING" enabled_by_default="true" />
|
||||
<inspection_tool class="SuspiciousArrayMethodCall" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="SuspiciousIndentAfterControlStatement" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="SuspiciousListRemoveInLoop" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="SuspiciousMethodCalls" enabled="true" level="ERROR" enabled_by_default="true">
|
||||
<option name="REPORT_CONVERTIBLE_METHOD_CALLS" value="true" />
|
||||
</inspection_tool>
|
||||
|
@ -226,6 +261,11 @@
|
|||
<option name="ignoreInModuleStatements" value="true" />
|
||||
</inspection_tool>
|
||||
<inspection_tool class="UnnecessaryInterfaceModifier" enabled="true" level="ERROR" enabled_by_default="true" />
|
||||
<inspection_tool class="UnusedAssignment" enabled="true" level="ERROR" enabled_by_default="true">
|
||||
<option name="REPORT_PREFIX_EXPRESSIONS" value="true" />
|
||||
<option name="REPORT_POSTFIX_EXPRESSIONS" value="true" />
|
||||
<option name="REPORT_REDUNDANT_INITIALIZER" value="true" />
|
||||
</inspection_tool>
|
||||
<inspection_tool class="UnusedCatchParameter" enabled="true" level="WARNING" enabled_by_default="true">
|
||||
<option name="m_ignoreCatchBlocksWithComments" value="false" />
|
||||
<option name="m_ignoreTestCases" value="false" />
|
||||
|
|
|
@ -95,7 +95,6 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
@ -167,15 +166,13 @@ public class TimeCompareBenchmark
|
|||
INDEX_MERGER_V9 = new IndexMergerV9(JSON_MAPPER, INDEX_IO, OffHeapMemorySegmentWriteOutMediumFactory.instance());
|
||||
}
|
||||
|
||||
private static final Map<String, Map<String, Object>> SCHEMA_QUERY_MAP = new LinkedHashMap<>();
|
||||
|
||||
private void setupQueries()
|
||||
{
|
||||
// queries for the basic schema
|
||||
Map<String, Object> basicQueries = new LinkedHashMap<>();
|
||||
BenchmarkSchemaInfo basicSchema = BenchmarkSchemas.SCHEMA_MAP.get("basic");
|
||||
|
||||
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
|
||||
QuerySegmentSpec intervalSpec =
|
||||
new MultipleIntervalSegmentSpec(Collections.singletonList(basicSchema.getDataInterval()));
|
||||
|
||||
long startMillis = basicSchema.getDataInterval().getStartMillis();
|
||||
long endMillis = basicSchema.getDataInterval().getEndMillis();
|
||||
|
@ -204,9 +201,7 @@ public class TimeCompareBenchmark
|
|||
);
|
||||
queryAggs.add(
|
||||
new FilteredAggregatorFactory(
|
||||
new LongSumAggregatorFactory(
|
||||
"_cmp_sumLongSequential", "sumLongSequential"
|
||||
),
|
||||
new LongSumAggregatorFactory("_cmp_sumLongSequential", "sumLongSequential"),
|
||||
new IntervalDimFilter(
|
||||
ColumnHolder.TIME_COLUMN_NAME,
|
||||
Collections.singletonList(previous),
|
||||
|
@ -235,8 +230,6 @@ public class TimeCompareBenchmark
|
|||
new TopNQueryQueryToolChest(new TopNQueryConfig(), QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()),
|
||||
QueryBenchmarkUtil.NOOP_QUERYWATCHER
|
||||
);
|
||||
|
||||
basicQueries.put("topNTimeCompare", queryBuilderA);
|
||||
}
|
||||
{ // basic.timeseriesTimeCompare
|
||||
List<AggregatorFactory> queryAggs = new ArrayList<>();
|
||||
|
@ -265,24 +258,21 @@ public class TimeCompareBenchmark
|
|||
)
|
||||
);
|
||||
|
||||
Druids.TimeseriesQueryBuilder timeseriesQueryBuilder = Druids.newTimeseriesQueryBuilder()
|
||||
.dataSource("blah")
|
||||
.granularity(Granularities.ALL)
|
||||
.intervals(intervalSpec)
|
||||
.aggregators(queryAggs)
|
||||
.descending(false);
|
||||
Druids.TimeseriesQueryBuilder timeseriesQueryBuilder = Druids
|
||||
.newTimeseriesQueryBuilder()
|
||||
.dataSource("blah")
|
||||
.granularity(Granularities.ALL)
|
||||
.intervals(intervalSpec)
|
||||
.aggregators(queryAggs)
|
||||
.descending(false);
|
||||
|
||||
timeseriesQuery = timeseriesQueryBuilder.build();
|
||||
timeseriesFactory = new TimeseriesQueryRunnerFactory(
|
||||
new TimeseriesQueryQueryToolChest(
|
||||
QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()
|
||||
),
|
||||
new TimeseriesQueryQueryToolChest(QueryBenchmarkUtil.NoopIntervalChunkingQueryRunnerDecorator()),
|
||||
new TimeseriesQueryEngine(),
|
||||
QueryBenchmarkUtil.NOOP_QUERYWATCHER
|
||||
);
|
||||
}
|
||||
|
||||
SCHEMA_QUERY_MAP.put("basic", basicQueries);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -59,26 +59,6 @@ public class Comparators
|
|||
return NATURAL_NULLS_FIRST;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a "reverse" comparator. Positive becomes negative, negative becomes positive and 0 (equal) stays the same.
|
||||
* This was poorly named as "inverse" as it's not really inverting a true/false relationship
|
||||
*
|
||||
* @param baseComp
|
||||
* @param <T>
|
||||
* @return
|
||||
*/
|
||||
public static <T> Comparator<T> inverse(final Comparator<T> baseComp)
|
||||
{
|
||||
return new Comparator<T>()
|
||||
{
|
||||
@Override
|
||||
public int compare(T t, T t1)
|
||||
{
|
||||
return baseComp.compare(t1, t);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Use Guava Ordering.natural() instead
|
||||
*
|
||||
|
|
|
@ -50,7 +50,7 @@ public class Emitters
|
|||
Lifecycle lifecycle
|
||||
)
|
||||
{
|
||||
Map<String, Object> jsonified = new HashMap<>();
|
||||
Map<String, Object> jsonified;
|
||||
if (props.getProperty(LOG_EMITTER_PROP) != null) {
|
||||
jsonified = makeLoggingMap(props);
|
||||
jsonified.put("type", "logging");
|
||||
|
|
|
@ -77,7 +77,7 @@ public class ChannelResourceFactory implements ResourceFactory<String, ChannelFu
|
|||
public ChannelFuture generate(final String hostname)
|
||||
{
|
||||
log.debug("Generating: %s", hostname);
|
||||
URL url = null;
|
||||
URL url;
|
||||
try {
|
||||
url = new URL(hostname);
|
||||
}
|
||||
|
|
|
@ -52,6 +52,9 @@ public abstract class ExprEval<T>
|
|||
|
||||
public static ExprEval of(@Nullable String stringValue)
|
||||
{
|
||||
if (stringValue == null) {
|
||||
return StringExprEval.OF_NULL;
|
||||
}
|
||||
return new StringExprEval(stringValue);
|
||||
}
|
||||
|
||||
|
@ -180,7 +183,11 @@ public abstract class ExprEval<T>
|
|||
case DOUBLE:
|
||||
return this;
|
||||
case LONG:
|
||||
return ExprEval.of(value == null ? null : asLong());
|
||||
if (value == null) {
|
||||
return ExprEval.of(null);
|
||||
} else {
|
||||
return ExprEval.of(asLong());
|
||||
}
|
||||
case STRING:
|
||||
return ExprEval.of(asString());
|
||||
}
|
||||
|
@ -218,7 +225,11 @@ public abstract class ExprEval<T>
|
|||
{
|
||||
switch (castTo) {
|
||||
case DOUBLE:
|
||||
return ExprEval.of(value == null ? null : asDouble());
|
||||
if (value == null) {
|
||||
return ExprEval.of(null);
|
||||
} else {
|
||||
return ExprEval.of(asDouble());
|
||||
}
|
||||
case LONG:
|
||||
return this;
|
||||
case STRING:
|
||||
|
@ -236,6 +247,8 @@ public abstract class ExprEval<T>
|
|||
|
||||
private static class StringExprEval extends ExprEval<String>
|
||||
{
|
||||
private static final StringExprEval OF_NULL = new StringExprEval(null);
|
||||
|
||||
private Number numericVal;
|
||||
|
||||
private StringExprEval(@Nullable String value)
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.junit.Test;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
|
@ -170,10 +171,9 @@ public class OrderedMergeIteratorTest
|
|||
@Test(expected = NoSuchElementException.class)
|
||||
public void testNoElementInNext()
|
||||
{
|
||||
final ArrayList<Iterator<Integer>> iterators = new ArrayList<>();
|
||||
OrderedMergeIterator<Integer> iter = new OrderedMergeIterator<Integer>(
|
||||
OrderedMergeIterator<Integer> iter = new OrderedMergeIterator<>(
|
||||
Ordering.natural(),
|
||||
iterators.iterator()
|
||||
Collections.emptyIterator()
|
||||
);
|
||||
iter.next();
|
||||
}
|
||||
|
@ -181,10 +181,9 @@ public class OrderedMergeIteratorTest
|
|||
@Test(expected = UnsupportedOperationException.class)
|
||||
public void testRemove()
|
||||
{
|
||||
final ArrayList<Iterator<Integer>> iterators = new ArrayList<>();
|
||||
OrderedMergeIterator<Integer> iter = new OrderedMergeIterator<Integer>(
|
||||
OrderedMergeIterator<Integer> iter = new OrderedMergeIterator<>(
|
||||
Ordering.natural(),
|
||||
iterators.iterator()
|
||||
Collections.emptyIterator()
|
||||
);
|
||||
iter.remove();
|
||||
}
|
||||
|
|
|
@ -31,34 +31,6 @@ import java.util.Comparator;
|
|||
*/
|
||||
public class ComparatorsTest
|
||||
{
|
||||
@Test
|
||||
public void testInverse()
|
||||
{
|
||||
Comparator<Integer> normal = Comparators.comparable();
|
||||
Comparator<Integer> inverted = Comparators.inverse(normal);
|
||||
|
||||
Assert.assertEquals(-1, normal.compare(0, 1));
|
||||
Assert.assertEquals(1, normal.compare(1, 0));
|
||||
Assert.assertEquals(0, normal.compare(1, 1));
|
||||
Assert.assertEquals(1, inverted.compare(0, 1));
|
||||
Assert.assertEquals(-1, inverted.compare(1, 0));
|
||||
Assert.assertEquals(0, inverted.compare(1, 1));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInverseOverflow()
|
||||
{
|
||||
Comparator<Integer> invertedSimpleIntegerComparator = Comparators.inverse(new Comparator<Integer>()
|
||||
{
|
||||
@Override
|
||||
public int compare(Integer o1, Integer o2)
|
||||
{
|
||||
return o1 - o2;
|
||||
}
|
||||
});
|
||||
Assert.assertTrue(invertedSimpleIntegerComparator.compare(0, Integer.MIN_VALUE) < 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIntervalsByStartThenEnd()
|
||||
{
|
||||
|
|
|
@ -138,7 +138,7 @@ public class ConcatSequenceTest
|
|||
public Integer accumulate(Integer accumulated, Integer in)
|
||||
{
|
||||
Assert.assertEquals(accumulated, in);
|
||||
return ++accumulated;
|
||||
return accumulated + 1;
|
||||
}
|
||||
}
|
||||
).intValue()
|
||||
|
@ -154,7 +154,7 @@ public class ConcatSequenceTest
|
|||
public Integer accumulate(Integer accumulated, Integer in)
|
||||
{
|
||||
Assert.assertEquals(accumulated, in);
|
||||
return ++accumulated;
|
||||
return accumulated + 1;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
|
|
@ -134,7 +134,7 @@ public class SequenceTestHelper
|
|||
@Override
|
||||
public Integer accumulate(Integer accumulated, Integer in)
|
||||
{
|
||||
return ++accumulated;
|
||||
return accumulated + 1;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -156,7 +156,7 @@ public class SequenceTestHelper
|
|||
@Override
|
||||
public Integer accumulate(Integer accumulated, Integer in)
|
||||
{
|
||||
return ++accumulated;
|
||||
return accumulated + 1;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
|
|
@ -29,7 +29,7 @@ public final class BitIterator implements IntSet.IntIterator
|
|||
private boolean literalAndZeroFill;
|
||||
private int nextIndex = 0;
|
||||
private int nextOffset = 0;
|
||||
private int next = -1;
|
||||
private int next;
|
||||
|
||||
BitIterator(ImmutableConciseSet immutableConciseSet)
|
||||
{
|
||||
|
|
|
@ -1272,9 +1272,9 @@ public class ConciseSet extends AbstractIntSet implements Serializable
|
|||
final ConciseSet other = convert(o);
|
||||
|
||||
// the word at the end must be the same
|
||||
int res = this.last - other.last;
|
||||
int res = Integer.compare(this.last, other.last);
|
||||
if (res != 0) {
|
||||
return res < 0 ? -1 : 1;
|
||||
return res;
|
||||
}
|
||||
|
||||
// scan words from MSB to LSB
|
||||
|
@ -1295,9 +1295,9 @@ public class ConciseSet extends AbstractIntSet implements Serializable
|
|||
return -1;
|
||||
}
|
||||
// compare two sequences of zeros
|
||||
res = getSequenceCount(otherWord) - getSequenceCount(thisWord);
|
||||
res = Integer.compare(getSequenceCount(otherWord), getSequenceCount(thisWord));
|
||||
if (res != 0) {
|
||||
return res < 0 ? -1 : 1;
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
if (isZeroSequence(otherWord)) {
|
||||
|
@ -1305,9 +1305,9 @@ public class ConciseSet extends AbstractIntSet implements Serializable
|
|||
return 1;
|
||||
}
|
||||
// compare two sequences of ones
|
||||
res = getSequenceCount(thisWord) - getSequenceCount(otherWord);
|
||||
res = Integer.compare(getSequenceCount(thisWord), getSequenceCount(otherWord));
|
||||
if (res != 0) {
|
||||
return res < 0 ? -1 : 1;
|
||||
return res;
|
||||
}
|
||||
}
|
||||
// if the sequences are the same (both zeros or both ones)
|
||||
|
@ -1363,9 +1363,10 @@ public class ConciseSet extends AbstractIntSet implements Serializable
|
|||
otherWord--;
|
||||
}
|
||||
} else {
|
||||
res = thisWord - otherWord; // equals getLiteralBits(thisWord) - getLiteralBits(otherWord)
|
||||
// equals compare(getLiteralBits(thisWord), getLiteralBits(otherWord))
|
||||
res = Integer.compare(thisWord, otherWord);
|
||||
if (res != 0) {
|
||||
return res < 0 ? -1 : 1;
|
||||
return res;
|
||||
}
|
||||
if (--thisIndex >= 0) {
|
||||
thisWord = this.words[thisIndex];
|
||||
|
|
|
@ -216,7 +216,7 @@ public class ImmutableConciseSet
|
|||
int endIndex = length - 1;
|
||||
|
||||
int wordsWalked = 0;
|
||||
int last = 0;
|
||||
int last;
|
||||
|
||||
WordIterator iter = set.newWordIterator();
|
||||
|
||||
|
|
|
@ -30,11 +30,11 @@ public class AzureBlob
|
|||
{
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private String container = null;
|
||||
private String container;
|
||||
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private String path = null;
|
||||
private String path;
|
||||
|
||||
@JsonCreator
|
||||
public AzureBlob(@JsonProperty("container") String container, @JsonProperty("path") String path)
|
||||
|
|
|
@ -62,9 +62,8 @@ public class CassandraDataSegmentPuller extends CassandraStorage
|
|||
final File tmpFile = new File(outDir, "index.zip");
|
||||
log.info("Pulling to temporary local cache [%s]", tmpFile.getAbsolutePath());
|
||||
|
||||
final FileUtils.FileCopyResult localResult;
|
||||
try {
|
||||
localResult = RetryUtils.retry(
|
||||
RetryUtils.retry(
|
||||
() -> {
|
||||
try (OutputStream os = new FileOutputStream(tmpFile)) {
|
||||
ChunkedStorage
|
||||
|
|
|
@ -29,15 +29,15 @@ public class CloudFilesBlob
|
|||
{
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private String container = null;
|
||||
private String container;
|
||||
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private String path = null;
|
||||
private String path;
|
||||
|
||||
@JsonProperty
|
||||
@NotNull
|
||||
private String region = null;
|
||||
private String region;
|
||||
|
||||
@JsonCreator
|
||||
public CloudFilesBlob(
|
||||
|
|
|
@ -96,7 +96,7 @@ public class CloudFilesStorageDruidModule implements DruidModule
|
|||
{
|
||||
log.info("Building Cloud Files Api...");
|
||||
|
||||
Iterable<com.google.inject.Module> modules = null;
|
||||
Iterable<com.google.inject.Module> modules;
|
||||
if (config.getUseServiceNet()) {
|
||||
log.info("Configuring Cloud Files Api to use the internal service network...");
|
||||
modules = ImmutableSet.of(new SLF4JLoggingModule(), new InternalUrlModule());
|
||||
|
|
|
@ -197,7 +197,7 @@ public class KafkaSimpleConsumer
|
|||
)
|
||||
);
|
||||
OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
|
||||
OffsetResponse response = null;
|
||||
OffsetResponse response;
|
||||
try {
|
||||
response = consumer.getOffsetsBefore(request);
|
||||
}
|
||||
|
@ -219,7 +219,7 @@ public class KafkaSimpleConsumer
|
|||
|
||||
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException
|
||||
{
|
||||
FetchResponse response = null;
|
||||
FetchResponse response;
|
||||
Broker previousLeader = leaderBroker;
|
||||
while (true) {
|
||||
ensureConsumer(previousLeader);
|
||||
|
|
|
@ -327,7 +327,8 @@ public class MaterializedViewSupervisor implements Supervisor
|
|||
// use max created_date of base segments as the version of derivative segments
|
||||
Map<Interval, String> maxCreatedDate = baseSegmentsSnapshot.lhs;
|
||||
Map<Interval, String> derivativeVersion = derivativeSegmentsSnapshot.lhs;
|
||||
SortedMap<Interval, String> sortedToBuildInterval = new TreeMap<>(Comparators.inverse(Comparators.intervalsByStartThenEnd()));
|
||||
SortedMap<Interval, String> sortedToBuildInterval =
|
||||
new TreeMap<>(Comparators.intervalsByStartThenEnd().reversed());
|
||||
// find the intervals to drop and to build
|
||||
MapDifference<Interval, String> difference = Maps.difference(maxCreatedDate, derivativeVersion);
|
||||
Map<Interval, String> toBuildInterval = new HashMap<>(difference.entriesOnlyOnLeft());
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.hive.ql.exec.vector.BytesColumnVector;
|
||||
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
|
||||
import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
|
||||
import org.apache.hadoop.mapreduce.JobContext;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.orc.CompressionKind;
|
||||
import org.apache.orc.OrcFile;
|
||||
import org.apache.orc.TypeDescription;
|
||||
|
@ -225,7 +225,7 @@ public class OrcIndexGeneratorJobTest
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
ImmutableMap.of(JobContext.NUM_REDUCES, "0"), //verifies that set num reducers is ignored
|
||||
ImmutableMap.of(MRJobConfig.NUM_REDUCES, "0"), //verifies that set num reducers is ignored
|
||||
false,
|
||||
true,
|
||||
null,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.query.aggregation.datasketches.quantiles;
|
||||
|
||||
import com.yahoo.sketches.quantiles.DoublesSketch;
|
||||
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
|
||||
import org.apache.druid.query.aggregation.Aggregator;
|
||||
import org.apache.druid.segment.ColumnValueSelector;
|
||||
|
@ -35,7 +36,7 @@ public class DoublesSketchBuildAggregator implements Aggregator
|
|||
{
|
||||
this.valueSelector = valueSelector;
|
||||
this.size = size;
|
||||
sketch = UpdateDoublesSketch.builder().setK(size).build();
|
||||
sketch = DoublesSketch.builder().setK(size).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.apache.druid.query.aggregation.datasketches.quantiles;
|
||||
|
||||
import com.yahoo.memory.WritableMemory;
|
||||
import com.yahoo.sketches.quantiles.DoublesSketch;
|
||||
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
|
||||
import it.unimi.dsi.fastutil.ints.Int2ObjectMap;
|
||||
import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
|
||||
|
@ -54,7 +55,7 @@ public class DoublesSketchBuildBufferAggregator implements BufferAggregator
|
|||
{
|
||||
final WritableMemory mem = getMemory(buffer);
|
||||
final WritableMemory region = mem.writableRegion(position, maxIntermediateSize);
|
||||
final UpdateDoublesSketch sketch = UpdateDoublesSketch.builder().setK(size).build(region);
|
||||
final UpdateDoublesSketch sketch = DoublesSketch.builder().setK(size).build(region);
|
||||
putSketch(buffer, position, sketch);
|
||||
}
|
||||
|
||||
|
|
|
@ -90,6 +90,7 @@ public class SketchHolder
|
|||
|
||||
private static final Comparator<Memory> MEMORY_COMPARATOR = new Comparator<Memory>()
|
||||
{
|
||||
@SuppressWarnings("SubtractionInCompareTo")
|
||||
@Override
|
||||
public int compare(Memory o1, Memory o2)
|
||||
{
|
||||
|
|
|
@ -74,7 +74,7 @@ public class ArrayOfDoublesSketchToQuantilesSketchPostAggregator extends ArrayOf
|
|||
public DoublesSketch compute(final Map<String, Object> combinedAggregators)
|
||||
{
|
||||
final ArrayOfDoublesSketch sketch = (ArrayOfDoublesSketch) getField().compute(combinedAggregators);
|
||||
final UpdateDoublesSketch qs = UpdateDoublesSketch.builder().setK(k).build();
|
||||
final UpdateDoublesSketch qs = DoublesSketch.builder().setK(k).build();
|
||||
final ArrayOfDoublesSketchIterator it = sketch.iterator();
|
||||
while (it.next()) {
|
||||
qs.update(it.getValues()[column - 1]); // convert 1-based column number to zero-based index
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.druid.query.aggregation.datasketches.quantiles;
|
||||
|
||||
import com.yahoo.sketches.quantiles.DoublesSketch;
|
||||
import com.yahoo.sketches.quantiles.UpdateDoublesSketch;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
|
||||
|
@ -44,7 +45,7 @@ public class GenerateTestData
|
|||
int sequenceNumber = 0;
|
||||
for (int i = 0; i < 20; i++) {
|
||||
int product = rand.nextInt(10);
|
||||
UpdateDoublesSketch sketch = UpdateDoublesSketch.builder().build();
|
||||
UpdateDoublesSketch sketch = DoublesSketch.builder().build();
|
||||
for (int j = 0; j < 20; j++) {
|
||||
double value = rand.nextDouble();
|
||||
buildData.write("2016010101");
|
||||
|
|
|
@ -143,15 +143,17 @@ public class DruidKerberosAuthenticationHandler extends KerberosAuthenticationHa
|
|||
public AuthenticationToken authenticate(HttpServletRequest request, final HttpServletResponse response)
|
||||
throws IOException, AuthenticationException
|
||||
{
|
||||
AuthenticationToken token = null;
|
||||
String authorization = request.getHeader(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.AUTHORIZATION);
|
||||
AuthenticationToken token;
|
||||
String authorization = request
|
||||
.getHeader(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.AUTHORIZATION);
|
||||
|
||||
if (authorization == null
|
||||
|| !authorization.startsWith(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE)) {
|
||||
if (authorization == null ||
|
||||
!authorization.startsWith(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE)) {
|
||||
return null;
|
||||
} else {
|
||||
authorization = authorization.substring(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE
|
||||
.length()).trim();
|
||||
authorization = authorization
|
||||
.substring(org.apache.hadoop.security.authentication.client.KerberosAuthenticator.NEGOTIATE.length())
|
||||
.trim();
|
||||
final Base64 base64 = new Base64(0);
|
||||
final byte[] clientToken = base64.decode(authorization);
|
||||
final String serverName = request.getServerName();
|
||||
|
|
|
@ -248,7 +248,7 @@ public class KerberosAuthenticator implements Authenticator
|
|||
if (isExcluded(path)) {
|
||||
filterChain.doFilter(request, response);
|
||||
} else {
|
||||
String clientPrincipal = null;
|
||||
String clientPrincipal;
|
||||
try {
|
||||
Cookie[] cookies = httpReq.getCookies();
|
||||
if (cookies == null) {
|
||||
|
|
|
@ -73,6 +73,15 @@ import java.util.HashMap;
|
|||
*/
|
||||
public class HdfsDataSegmentPusherTest
|
||||
{
|
||||
static TestObjectMapper objectMapper;
|
||||
|
||||
static {
|
||||
objectMapper = new TestObjectMapper();
|
||||
InjectableValues.Std injectableValues = new InjectableValues.Std();
|
||||
injectableValues.addValue(ObjectMapper.class, objectMapper);
|
||||
injectableValues.addValue(DataSegment.PruneLoadSpecHolder.class, DataSegment.PruneLoadSpecHolder.DEFAULT);
|
||||
objectMapper.setInjectableValues(injectableValues);
|
||||
}
|
||||
|
||||
@Rule
|
||||
public final TemporaryFolder tempFolder = new TemporaryFolder();
|
||||
|
@ -80,23 +89,15 @@ public class HdfsDataSegmentPusherTest
|
|||
@Rule
|
||||
public final ExpectedException expectedException = ExpectedException.none();
|
||||
|
||||
static TestObjectMapper objectMapper = new TestObjectMapper();
|
||||
|
||||
private HdfsDataSegmentPusher hdfsDataSegmentPusher;
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException
|
||||
public void setUp()
|
||||
{
|
||||
HdfsDataSegmentPusherConfig hdfsDataSegmentPusherConf = new HdfsDataSegmentPusherConfig();
|
||||
hdfsDataSegmentPusherConf.setStorageDirectory("path/to/");
|
||||
hdfsDataSegmentPusher = new HdfsDataSegmentPusher(hdfsDataSegmentPusherConf, new Configuration(true), objectMapper);
|
||||
}
|
||||
static {
|
||||
objectMapper = new TestObjectMapper();
|
||||
InjectableValues.Std injectableValues = new InjectableValues.Std();
|
||||
injectableValues.addValue(ObjectMapper.class, objectMapper);
|
||||
injectableValues.addValue(DataSegment.PruneLoadSpecHolder.class, DataSegment.PruneLoadSpecHolder.DEFAULT);
|
||||
objectMapper.setInjectableValues(injectableValues);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPushWithScheme() throws Exception
|
||||
|
|
|
@ -363,7 +363,6 @@ public class ApproximateHistogram
|
|||
mergeValue = true;
|
||||
}
|
||||
if (deltaLeft < minDelta) {
|
||||
minDelta = deltaLeft;
|
||||
minPos = insertAt - 1;
|
||||
mergeValue = true;
|
||||
}
|
||||
|
@ -1563,7 +1562,7 @@ public class ApproximateHistogram
|
|||
int i = 0;
|
||||
int sum = 0;
|
||||
int k = 1;
|
||||
long count = 0;
|
||||
long count;
|
||||
while (k <= this.binCount()) {
|
||||
count = bins[k - 1];
|
||||
if (sum + count > s) {
|
||||
|
@ -1583,7 +1582,7 @@ public class ApproximateHistogram
|
|||
final double c = -2 * d;
|
||||
final long a = bins[i] - bins[i - 1];
|
||||
final long b = 2 * bins[i - 1];
|
||||
double z = 0;
|
||||
double z;
|
||||
if (a == 0) {
|
||||
z = -c / b;
|
||||
} else {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.druid.query.aggregation.histogram;
|
|||
import com.google.common.collect.Iterators;
|
||||
import org.apache.druid.java.util.common.StringUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -221,6 +222,7 @@ public class ApproximateHistogramTest
|
|||
}
|
||||
|
||||
//@Test
|
||||
@Ignore
|
||||
@SuppressWarnings("unused") //TODO rewrite using JMH and move to the benchmarks module
|
||||
public void testFoldSpeed()
|
||||
{
|
||||
|
|
|
@ -34,7 +34,6 @@ import com.google.common.collect.ImmutableList;
|
|||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
|
@ -43,6 +42,7 @@ import com.google.common.util.concurrent.MoreExecutors;
|
|||
import org.apache.commons.codec.digest.DigestUtils;
|
||||
import org.apache.druid.indexer.TaskLocation;
|
||||
import org.apache.druid.indexer.TaskStatus;
|
||||
import org.apache.druid.indexing.common.IndexTaskClient;
|
||||
import org.apache.druid.indexing.common.TaskInfoProvider;
|
||||
import org.apache.druid.indexing.common.stats.RowIngestionMetersFactory;
|
||||
import org.apache.druid.indexing.common.task.RealtimeIndexTask;
|
||||
|
@ -129,7 +129,6 @@ public class KafkaSupervisor implements Supervisor
|
|||
private static final long INITIAL_GET_OFFSET_DELAY_MILLIS = 15000;
|
||||
private static final long INITIAL_EMIT_LAG_METRIC_DELAY_MILLIS = 25000;
|
||||
private static final int MAX_INITIALIZATION_RETRIES = 20;
|
||||
private static final CopyOnWriteArrayList EMPTY_LIST = Lists.newCopyOnWriteArrayList();
|
||||
|
||||
public static final String IS_INCREMENTAL_HANDOFF_SUPPORTED = "IS_INCREMENTAL_HANDOFF_SUPPORTED";
|
||||
|
||||
|
@ -337,7 +336,7 @@ public class KafkaSupervisor implements Supervisor
|
|||
this.futureTimeoutInSeconds = Math.max(
|
||||
MINIMUM_FUTURE_TIMEOUT_IN_SECONDS,
|
||||
tuningConfig.getChatRetries() * (tuningConfig.getHttpTimeout().getStandardSeconds()
|
||||
+ KafkaIndexTaskClient.MAX_RETRY_WAIT_SECONDS)
|
||||
+ IndexTaskClient.MAX_RETRY_WAIT_SECONDS)
|
||||
);
|
||||
|
||||
int chatThreads = (this.tuningConfig.getChatThreads() != null
|
||||
|
@ -468,6 +467,12 @@ public class KafkaSupervisor implements Supervisor
|
|||
}
|
||||
}
|
||||
|
||||
private boolean someTaskGroupsPendingCompletion(Integer groupId)
|
||||
{
|
||||
CopyOnWriteArrayList<TaskGroup> taskGroups = pendingCompletionTaskGroups.get(groupId);
|
||||
return taskGroups != null && taskGroups.size() > 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SupervisorReport getStatus()
|
||||
{
|
||||
|
@ -1341,7 +1346,7 @@ public class KafkaSupervisor implements Supervisor
|
|||
partitionOffset.getValue() :
|
||||
latestOffsetsFromDb.getOrDefault(partitionOffset.getKey(), partitionOffset.getValue())
|
||||
) == 0) && earliestConsistentSequenceId.compareAndSet(-1, sequenceCheckpoint.getKey())) || (
|
||||
pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() > 0
|
||||
someTaskGroupsPendingCompletion(groupId)
|
||||
&& earliestConsistentSequenceId.compareAndSet(-1, taskCheckpoints.firstKey()))) {
|
||||
final SortedMap<Integer, Map<Integer, Long>> latestCheckpoints = new TreeMap<>(
|
||||
taskCheckpoints.tailMap(earliestConsistentSequenceId.get())
|
||||
|
@ -1378,7 +1383,7 @@ public class KafkaSupervisor implements Supervisor
|
|||
}
|
||||
|
||||
if ((tasksToKill.size() > 0 && tasksToKill.size() == taskGroup.tasks.size()) ||
|
||||
(taskGroup.tasks.size() == 0 && pendingCompletionTaskGroups.getOrDefault(groupId, EMPTY_LIST).size() == 0)) {
|
||||
(taskGroup.tasks.size() == 0 && !someTaskGroupsPendingCompletion(groupId))) {
|
||||
// killing all tasks or no task left in the group ?
|
||||
// clear state about the taskgroup so that get latest offset information is fetched from metadata store
|
||||
log.warn("Clearing task group [%d] information as no valid tasks left the group", groupId);
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.utils.SystemTime;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import scala.Some;
|
||||
import scala.collection.immutable.List$;
|
||||
|
||||
|
@ -81,7 +81,7 @@ public class TestBroker implements Closeable
|
|||
|
||||
final KafkaConfig config = new KafkaConfig(props);
|
||||
|
||||
server = new KafkaServer(config, SystemTime.SYSTEM, Some.apply(StringUtils.format("TestingBroker[%d]-", id)), List$.MODULE$.empty());
|
||||
server = new KafkaServer(config, Time.SYSTEM, Some.apply(StringUtils.format("TestingBroker[%d]-", id)), List$.MODULE$.empty());
|
||||
server.startup();
|
||||
}
|
||||
|
||||
|
|
|
@ -311,7 +311,7 @@ public class CacheSchedulerTest
|
|||
testDelete();
|
||||
}
|
||||
|
||||
public void testDelete() throws InterruptedException
|
||||
private void testDelete() throws InterruptedException
|
||||
{
|
||||
final long period = 1_000L; // Give it some time between attempts to update
|
||||
final UriExtractionNamespace namespace = getUriExtractionNamespace(period);
|
||||
|
|
|
@ -519,7 +519,7 @@ public class JdbcExtractionNamespaceTest
|
|||
private void waitForUpdates(long timeout, long numUpdates) throws InterruptedException
|
||||
{
|
||||
long startTime = System.currentTimeMillis();
|
||||
long pre = 0L;
|
||||
long pre;
|
||||
updateLock.lockInterruptibly();
|
||||
try {
|
||||
pre = updates.get();
|
||||
|
@ -527,7 +527,7 @@ public class JdbcExtractionNamespaceTest
|
|||
finally {
|
||||
updateLock.unlock();
|
||||
}
|
||||
long post = 0L;
|
||||
long post;
|
||||
do {
|
||||
// Sleep to spare a few cpu cycles
|
||||
Thread.sleep(5);
|
||||
|
|
|
@ -111,7 +111,7 @@ public class ProtobufInputRowParser implements ByteBufferInputRowParser
|
|||
|
||||
fin = this.getClass().getClassLoader().getResourceAsStream(descriptorFilePath);
|
||||
if (fin == null) {
|
||||
URL url = null;
|
||||
URL url;
|
||||
try {
|
||||
url = new URL(descriptorFilePath);
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ public class ProtobufInputRowParser implements ByteBufferInputRowParser
|
|||
}
|
||||
}
|
||||
|
||||
DynamicSchema dynamicSchema = null;
|
||||
DynamicSchema dynamicSchema;
|
||||
try {
|
||||
dynamicSchema = DynamicSchema.parseFrom(fin);
|
||||
}
|
||||
|
|
|
@ -125,12 +125,12 @@ public class VarianceGroupByQueryTest
|
|||
.setDataSource(QueryRunnerTestHelper.dataSource)
|
||||
.setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird)
|
||||
.setDimensions(new DefaultDimensionSpec("quality", "alias"))
|
||||
.setAggregatorSpecs(VarianceTestHelper.rowsCount,
|
||||
VarianceTestHelper.indexVarianceAggr,
|
||||
new LongSumAggregatorFactory("idx", "index"))
|
||||
.setPostAggregatorSpecs(
|
||||
Collections.singletonList(VarianceTestHelper.stddevOfIndexPostAggr)
|
||||
.setAggregatorSpecs(
|
||||
QueryRunnerTestHelper.rowsCount,
|
||||
VarianceTestHelper.indexVarianceAggr,
|
||||
new LongSumAggregatorFactory("idx", "index")
|
||||
)
|
||||
.setPostAggregatorSpecs(Collections.singletonList(VarianceTestHelper.stddevOfIndexPostAggr))
|
||||
.setGranularity(QueryRunnerTestHelper.dayGran)
|
||||
.build();
|
||||
|
||||
|
@ -178,12 +178,14 @@ public class VarianceGroupByQueryTest
|
|||
|
||||
GroupByQuery query = GroupByQuery
|
||||
.builder()
|
||||
.setDataSource(VarianceTestHelper.dataSource)
|
||||
.setDataSource(QueryRunnerTestHelper.dataSource)
|
||||
.setInterval("2011-04-02/2011-04-04")
|
||||
.setDimensions(new DefaultDimensionSpec("quality", "alias"))
|
||||
.setAggregatorSpecs(VarianceTestHelper.rowsCount,
|
||||
VarianceTestHelper.indexLongSum,
|
||||
VarianceTestHelper.indexVarianceAggr)
|
||||
.setAggregatorSpecs(
|
||||
QueryRunnerTestHelper.rowsCount,
|
||||
QueryRunnerTestHelper.indexLongSum,
|
||||
VarianceTestHelper.indexVarianceAggr
|
||||
)
|
||||
.setPostAggregatorSpecs(ImmutableList.of(VarianceTestHelper.stddevOfIndexPostAggr))
|
||||
.setGranularity(new PeriodGranularity(new Period("P1M"), null, null))
|
||||
.setHavingSpec(
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.druid.java.util.common.DateTimes;
|
|||
import org.apache.druid.query.Druids;
|
||||
import org.apache.druid.query.QueryPlus;
|
||||
import org.apache.druid.query.QueryRunner;
|
||||
import org.apache.druid.query.QueryRunnerTestHelper;
|
||||
import org.apache.druid.query.Result;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
import org.apache.druid.query.timeseries.TimeseriesQuery;
|
||||
|
@ -59,13 +60,13 @@ public class VarianceTimeseriesQueryTest
|
|||
public void testTimeseriesWithNullFilterOnNonExistentDimension()
|
||||
{
|
||||
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
|
||||
.dataSource(VarianceTestHelper.dataSource)
|
||||
.granularity(VarianceTestHelper.dayGran)
|
||||
.dataSource(QueryRunnerTestHelper.dataSource)
|
||||
.granularity(QueryRunnerTestHelper.dayGran)
|
||||
.filters("bobby", null)
|
||||
.intervals(VarianceTestHelper.firstToThird)
|
||||
.intervals(QueryRunnerTestHelper.firstToThird)
|
||||
.aggregators(VarianceTestHelper.commonPlusVarAggregators)
|
||||
.postAggregators(
|
||||
VarianceTestHelper.addRowsIndexConstant,
|
||||
QueryRunnerTestHelper.addRowsIndexConstant,
|
||||
VarianceTestHelper.stddevOfIndexPostAggr
|
||||
)
|
||||
.descending(descending)
|
||||
|
@ -75,11 +76,11 @@ public class VarianceTimeseriesQueryTest
|
|||
new Result<>(
|
||||
DateTimes.of("2011-04-01"),
|
||||
new TimeseriesResultValue(
|
||||
VarianceTestHelper.of(
|
||||
QueryRunnerTestHelper.of(
|
||||
"rows", 13L,
|
||||
"index", 6626.151596069336,
|
||||
"addRowsIndexConstant", 6640.151596069336,
|
||||
"uniques", VarianceTestHelper.UNIQUES_9,
|
||||
"uniques", QueryRunnerTestHelper.UNIQUES_9,
|
||||
"index_var", descending ? 368885.6897238851 : 368885.689155086,
|
||||
"index_stddev", descending ? 607.3596049490657 : 607.35960448081
|
||||
)
|
||||
|
@ -88,11 +89,11 @@ public class VarianceTimeseriesQueryTest
|
|||
new Result<>(
|
||||
DateTimes.of("2011-04-02"),
|
||||
new TimeseriesResultValue(
|
||||
VarianceTestHelper.of(
|
||||
QueryRunnerTestHelper.of(
|
||||
"rows", 13L,
|
||||
"index", 5833.2095947265625,
|
||||
"addRowsIndexConstant", 5847.2095947265625,
|
||||
"uniques", VarianceTestHelper.UNIQUES_9,
|
||||
"uniques", QueryRunnerTestHelper.UNIQUES_9,
|
||||
"index_var", descending ? 259061.6037088883 : 259061.60216419376,
|
||||
"index_stddev", descending ? 508.9809463122252 : 508.98094479478675
|
||||
)
|
||||
|
|
|
@ -358,7 +358,7 @@ public abstract class HyperLogLogCollector implements Comparable<HyperLogLogColl
|
|||
short numNonZeroRegisters = addNibbleRegister(bucket, (byte) ((0xff & positionOf1) - registerOffset));
|
||||
setNumNonZeroRegisters(numNonZeroRegisters);
|
||||
if (numNonZeroRegisters == NUM_BUCKETS) {
|
||||
setRegisterOffset(++registerOffset);
|
||||
setRegisterOffset((byte) (registerOffset + 1));
|
||||
setNumNonZeroRegisters(decrementBuckets());
|
||||
}
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ public abstract class HyperLogLogCollector implements Comparable<HyperLogLogColl
|
|||
}
|
||||
if (numNonZero == NUM_BUCKETS) {
|
||||
numNonZero = decrementBuckets();
|
||||
setRegisterOffset(++myOffset);
|
||||
setRegisterOffset((byte) (myOffset + 1));
|
||||
setNumNonZeroRegisters(numNonZero);
|
||||
}
|
||||
} else { // dense
|
||||
|
@ -437,7 +437,7 @@ public abstract class HyperLogLogCollector implements Comparable<HyperLogLogColl
|
|||
}
|
||||
if (numNonZero == NUM_BUCKETS) {
|
||||
numNonZero = decrementBuckets();
|
||||
setRegisterOffset(++myOffset);
|
||||
setRegisterOffset((byte) (myOffset + 1));
|
||||
setNumNonZeroRegisters(numNonZero);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ public class HyperLogLogCollectorBenchmark extends SimpleBenchmark
|
|||
{
|
||||
private final HashFunction fn = Hashing.murmur3_128();
|
||||
|
||||
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection") // TODO understand if this field should be used or not
|
||||
private final List<HyperLogLogCollector> collectors = new ArrayList<>();
|
||||
|
||||
@Param({"true"}) boolean targetIsDirect;
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.apache.hadoop.io.BytesWritable;
|
|||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.SequenceFile.Writer;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.mapreduce.JobContext;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeComparator;
|
||||
|
@ -524,7 +524,7 @@ public class IndexGeneratorJobTest
|
|||
false,
|
||||
false,
|
||||
false,
|
||||
ImmutableMap.of(JobContext.NUM_REDUCES, "0"), //verifies that set num reducers is ignored
|
||||
ImmutableMap.of(MRJobConfig.NUM_REDUCES, "0"), //verifies that set num reducers is ignored
|
||||
false,
|
||||
useCombiner,
|
||||
null,
|
||||
|
|
|
@ -280,7 +280,7 @@ public class HadoopIndexTask extends HadoopTask implements ChatHandler
|
|||
toolbox.getSegmentPusher().getPathForHadoop()
|
||||
};
|
||||
|
||||
HadoopIngestionSpec indexerSchema = null;
|
||||
HadoopIngestionSpec indexerSchema;
|
||||
final ClassLoader oldLoader = Thread.currentThread().getContextClassLoader();
|
||||
Class<?> determinePartitionsRunnerClass = determinePartitionsInnerProcessingRunner.getClass();
|
||||
Method determinePartitionsInnerProcessingRunTask = determinePartitionsRunnerClass.getMethod(
|
||||
|
|
|
@ -963,7 +963,7 @@ public class HttpRemoteTaskRunner implements WorkerTaskRunner, TaskLogStreamer
|
|||
pendingTasksExec.execute(
|
||||
() -> {
|
||||
while (!Thread.interrupted() && lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS)) {
|
||||
ImmutableWorkerInfo immutableWorker = null;
|
||||
ImmutableWorkerInfo immutableWorker;
|
||||
HttpRemoteTaskRunnerWorkItem taskItem = null;
|
||||
try {
|
||||
synchronized (statusLock) {
|
||||
|
|
|
@ -289,7 +289,7 @@ public class SupervisorManager
|
|||
metadataSupervisorManager.insert(id, spec);
|
||||
}
|
||||
|
||||
Supervisor supervisor = null;
|
||||
Supervisor supervisor;
|
||||
try {
|
||||
supervisor = spec.createSupervisor();
|
||||
supervisor.start();
|
||||
|
|
|
@ -576,7 +576,7 @@ public abstract class WorkerTaskManager
|
|||
@Override
|
||||
public void handle()
|
||||
{
|
||||
TaskAnnouncement announcement = null;
|
||||
TaskAnnouncement announcement;
|
||||
synchronized (lock) {
|
||||
if (runningTasks.containsKey(task.getId()) || completedTasks.containsKey(task.getId())) {
|
||||
log.warn(
|
||||
|
|
|
@ -174,7 +174,6 @@ public class IngestSegmentFirehoseFactoryTest
|
|||
final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null)
|
||||
{
|
||||
private final Set<DataSegment> published = new HashSet<>();
|
||||
private final Set<DataSegment> nuked = new HashSet<>();
|
||||
|
||||
@Override
|
||||
public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval)
|
||||
|
@ -210,7 +209,7 @@ public class IngestSegmentFirehoseFactoryTest
|
|||
@Override
|
||||
public void deleteSegments(Set<DataSegment> segments)
|
||||
{
|
||||
nuked.addAll(segments);
|
||||
// do nothing
|
||||
}
|
||||
};
|
||||
final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(
|
||||
|
@ -596,7 +595,6 @@ public class IngestSegmentFirehoseFactoryTest
|
|||
final int numSegmentsPerPartitionChunk = 5;
|
||||
final int numPartitionChunksPerTimelineObject = 10;
|
||||
final int numSegments = numSegmentsPerPartitionChunk * numPartitionChunksPerTimelineObject;
|
||||
final List<DataSegment> segments = new ArrayList<>(numSegments);
|
||||
final Interval interval = Intervals.of("2017-01-01/2017-01-02");
|
||||
final String version = "1";
|
||||
|
||||
|
@ -621,7 +619,6 @@ public class IngestSegmentFirehoseFactoryTest
|
|||
1,
|
||||
1
|
||||
);
|
||||
segments.add(segment);
|
||||
|
||||
final PartitionChunk<DataSegment> partitionChunk = new NumberedPartitionChunk<>(
|
||||
i,
|
||||
|
|
|
@ -19,20 +19,15 @@
|
|||
|
||||
package org.apache.druid.indexing.test;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.druid.segment.loading.DataSegmentKiller;
|
||||
import org.apache.druid.timeline.DataSegment;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
public class TestDataSegmentKiller implements DataSegmentKiller
|
||||
{
|
||||
private final Set<DataSegment> killedSegments = Sets.newConcurrentHashSet();
|
||||
|
||||
@Override
|
||||
public void kill(DataSegment segment)
|
||||
{
|
||||
killedSegments.add(segment);
|
||||
// do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,19 +19,15 @@
|
|||
|
||||
package org.apache.druid.indexing.test;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.druid.segment.loading.DataSegmentPusher;
|
||||
import org.apache.druid.timeline.DataSegment;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.URI;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class TestDataSegmentPusher implements DataSegmentPusher
|
||||
{
|
||||
private final Set<DataSegment> pushedSegments = Sets.newConcurrentHashSet();
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public String getPathForHadoop(String dataSource)
|
||||
|
@ -48,7 +44,6 @@ public class TestDataSegmentPusher implements DataSegmentPusher
|
|||
@Override
|
||||
public DataSegment push(File file, DataSegment segment, boolean useUniquePath)
|
||||
{
|
||||
pushedSegments.add(segment);
|
||||
return segment;
|
||||
}
|
||||
|
||||
|
|
|
@ -88,12 +88,12 @@ public class CoordinatorResourceTestClient
|
|||
// return a list of the segment dates for the specified datasource
|
||||
public List<String> getMetadataSegments(final String dataSource)
|
||||
{
|
||||
ArrayList<String> segments = null;
|
||||
ArrayList<String> segments;
|
||||
try {
|
||||
StatusResponseHolder response = makeRequest(HttpMethod.GET, getMetadataSegmentsURL(dataSource));
|
||||
|
||||
segments = jsonMapper.readValue(
|
||||
response.getContent(), new TypeReference<ArrayList<String>>()
|
||||
response.getContent(), new TypeReference<List<String>>()
|
||||
{
|
||||
}
|
||||
);
|
||||
|
@ -107,12 +107,12 @@ public class CoordinatorResourceTestClient
|
|||
// return a list of the segment dates for the specified datasource
|
||||
public List<String> getSegmentIntervals(final String dataSource)
|
||||
{
|
||||
ArrayList<String> segments = null;
|
||||
ArrayList<String> segments;
|
||||
try {
|
||||
StatusResponseHolder response = makeRequest(HttpMethod.GET, getIntervalsURL(dataSource));
|
||||
|
||||
segments = jsonMapper.readValue(
|
||||
response.getContent(), new TypeReference<ArrayList<String>>()
|
||||
response.getContent(), new TypeReference<List<String>>()
|
||||
{
|
||||
}
|
||||
);
|
||||
|
@ -125,7 +125,7 @@ public class CoordinatorResourceTestClient
|
|||
|
||||
private Map<String, Integer> getLoadStatus()
|
||||
{
|
||||
Map<String, Integer> status = null;
|
||||
Map<String, Integer> status;
|
||||
try {
|
||||
StatusResponseHolder response = makeRequest(HttpMethod.GET, getLoadStatusURL());
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ public class ITHadoopIndexTest extends AbstractIndexerTest
|
|||
|
||||
private void loadData(String hadoopDir)
|
||||
{
|
||||
String indexerSpec = "";
|
||||
String indexerSpec;
|
||||
|
||||
try {
|
||||
LOG.info("indexerFile name: [%s]", BATCH_TASK);
|
||||
|
|
|
@ -100,7 +100,7 @@ public abstract class AbstractITRealtimeIndexTaskTest extends AbstractIndexerTes
|
|||
TimeUnit.SECONDS.sleep(5);
|
||||
|
||||
// put the timestamps into the query structure
|
||||
String query_response_template = null;
|
||||
String query_response_template;
|
||||
InputStream is = ITRealtimeIndexTaskTest.class.getResourceAsStream(getQueriesResource());
|
||||
if (null == is) {
|
||||
throw new ISE("could not open query file: %s", getQueriesResource());
|
||||
|
|
|
@ -61,7 +61,7 @@ public class ITAppenderatorDriverRealtimeIndexTaskTest extends AbstractITRealtim
|
|||
final ServerDiscoverySelector eventReceiverSelector = factory.createSelector(EVENT_RECEIVER_SERVICE_NAME);
|
||||
eventReceiverSelector.start();
|
||||
BufferedReader reader = null;
|
||||
InputStreamReader isr = null;
|
||||
InputStreamReader isr;
|
||||
try {
|
||||
isr = new InputStreamReader(
|
||||
ITRealtimeIndexTaskTest.class.getResourceAsStream(EVENT_DATA_FILE),
|
||||
|
|
|
@ -197,7 +197,6 @@ public class ITKafkaTest extends AbstractIndexerTest
|
|||
consumerProperties.put("zookeeper.connection.timeout.ms", "15000");
|
||||
consumerProperties.put("zookeeper.sync.time.ms", "5000");
|
||||
consumerProperties.put("group.id", Long.toString(System.currentTimeMillis()));
|
||||
consumerProperties.put("zookeeper.sync.time.ms", "5000");
|
||||
consumerProperties.put("fetch.message.max.bytes", "1048586");
|
||||
consumerProperties.put("auto.offset.reset", "smallest");
|
||||
consumerProperties.put("auto.commit.enable", "false");
|
||||
|
@ -249,20 +248,20 @@ public class ITKafkaTest extends AbstractIndexerTest
|
|||
segmentsExist = true;
|
||||
|
||||
// put the timestamps into the query structure
|
||||
String query_response_template = null;
|
||||
String queryResponseTemplate;
|
||||
InputStream is = ITKafkaTest.class.getResourceAsStream(QUERIES_FILE);
|
||||
if (null == is) {
|
||||
throw new ISE("could not open query file: %s", QUERIES_FILE);
|
||||
}
|
||||
|
||||
try {
|
||||
query_response_template = IOUtils.toString(is, "UTF-8");
|
||||
queryResponseTemplate = IOUtils.toString(is, "UTF-8");
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw new ISE(e, "could not read query file: %s", QUERIES_FILE);
|
||||
}
|
||||
|
||||
String queryStr = query_response_template
|
||||
String queryStr = queryResponseTemplate
|
||||
.replaceAll("%%DATASOURCE%%", DATASOURCE)
|
||||
// time boundary
|
||||
.replace("%%TIMEBOUNDARY_RESPONSE_TIMESTAMP%%", TIMESTAMP_FMT.print(dtFirst))
|
||||
|
|
|
@ -74,7 +74,7 @@ public class ITRealtimeIndexTaskTest extends AbstractITRealtimeIndexTaskTest
|
|||
final ServerDiscoverySelector eventReceiverSelector = factory.createSelector(EVENT_RECEIVER_SERVICE_NAME);
|
||||
eventReceiverSelector.start();
|
||||
BufferedReader reader = null;
|
||||
InputStreamReader isr = null;
|
||||
InputStreamReader isr;
|
||||
try {
|
||||
isr = new InputStreamReader(
|
||||
ITRealtimeIndexTaskTest.class.getResourceAsStream(EVENT_DATA_FILE),
|
||||
|
|
9
pom.xml
9
pom.xml
|
@ -337,7 +337,7 @@
|
|||
<dependency>
|
||||
<groupId>com.google.errorprone</groupId>
|
||||
<artifactId>error_prone_annotations</artifactId>
|
||||
<version>2.2.0</version>
|
||||
<version>2.3.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.ibm.icu</groupId>
|
||||
|
@ -1265,8 +1265,8 @@
|
|||
<arg>-Xep:PreconditionsInvalidPlaceholder:ERROR</arg>
|
||||
<arg>-Xep:MissingOverride:ERROR</arg>
|
||||
<arg>-Xep:DefaultCharset:ERROR</arg>
|
||||
<arg>-Xep:QualifierOrScopeOnInjectMethod:ERROR</arg>
|
||||
|
||||
<arg>-Xep:ArgumentParameterSwap</arg>
|
||||
<arg>-Xep:AssistedInjectAndInjectOnSameConstructor</arg>
|
||||
<arg>-Xep:AutoFactoryAtInject</arg>
|
||||
<arg>-Xep:ClassName</arg>
|
||||
|
@ -1289,7 +1289,6 @@
|
|||
<arg>-Xep:NumericEquality</arg>
|
||||
<arg>-Xep:ParameterPackage</arg>
|
||||
<arg>-Xep:ProtoStringFieldReferenceEquality</arg>
|
||||
<arg>-Xep:QualifierOnMethodWithoutProvides</arg>
|
||||
<arg>-Xep:UnlockMethod</arg>
|
||||
</compilerArgs>
|
||||
</configuration>
|
||||
|
@ -1297,14 +1296,14 @@
|
|||
<dependency>
|
||||
<groupId>org.codehaus.plexus</groupId>
|
||||
<artifactId>plexus-compiler-javac-errorprone</artifactId>
|
||||
<version>2.8.1</version>
|
||||
<version>2.8.5</version>
|
||||
</dependency>
|
||||
<!-- override plexus-compiler-javac-errorprone's dependency on
|
||||
Error Prone with the latest version -->
|
||||
<dependency>
|
||||
<groupId>com.google.errorprone</groupId>
|
||||
<artifactId>error_prone_core</artifactId>
|
||||
<version>2.0.19</version>
|
||||
<version>2.3.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
</plugin>
|
||||
|
|
|
@ -95,11 +95,14 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
|
||||
private final VirtualColumns virtualColumns;
|
||||
private final LimitSpec limitSpec;
|
||||
@Nullable
|
||||
private final HavingSpec havingSpec;
|
||||
@Nullable
|
||||
private final DimFilter dimFilter;
|
||||
private final List<DimensionSpec> dimensions;
|
||||
private final List<AggregatorFactory> aggregatorSpecs;
|
||||
private final List<PostAggregator> postAggregatorSpecs;
|
||||
@Nullable
|
||||
private final List<List<String>> subtotalsSpec;
|
||||
|
||||
private final boolean applyLimitPushDown;
|
||||
|
@ -115,9 +118,9 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
@JsonProperty("dimensions") List<DimensionSpec> dimensions,
|
||||
@JsonProperty("aggregations") List<AggregatorFactory> aggregatorSpecs,
|
||||
@JsonProperty("postAggregations") List<PostAggregator> postAggregatorSpecs,
|
||||
@JsonProperty("having") HavingSpec havingSpec,
|
||||
@JsonProperty("having") @Nullable HavingSpec havingSpec,
|
||||
@JsonProperty("limitSpec") LimitSpec limitSpec,
|
||||
@JsonProperty("subtotalsSpec") List<List<String>> subtotalsSpec,
|
||||
@JsonProperty("subtotalsSpec") @Nullable List<List<String>> subtotalsSpec,
|
||||
@JsonProperty("context") Map<String, Object> context
|
||||
)
|
||||
{
|
||||
|
@ -168,12 +171,12 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
final DataSource dataSource,
|
||||
final QuerySegmentSpec querySegmentSpec,
|
||||
final VirtualColumns virtualColumns,
|
||||
final DimFilter dimFilter,
|
||||
final @Nullable DimFilter dimFilter,
|
||||
final Granularity granularity,
|
||||
final List<DimensionSpec> dimensions,
|
||||
final List<AggregatorFactory> aggregatorSpecs,
|
||||
final List<PostAggregator> postAggregatorSpecs,
|
||||
final HavingSpec havingSpec,
|
||||
final @Nullable List<DimensionSpec> dimensions,
|
||||
final @Nullable List<AggregatorFactory> aggregatorSpecs,
|
||||
final @Nullable List<PostAggregator> postAggregatorSpecs,
|
||||
final @Nullable HavingSpec havingSpec,
|
||||
final LimitSpec limitSpec,
|
||||
final @Nullable List<List<String>> subtotalsSpec,
|
||||
final @Nullable Function<Sequence<Row>, Sequence<Row>> postProcessingFn,
|
||||
|
@ -198,7 +201,7 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
this.havingSpec = havingSpec;
|
||||
this.limitSpec = LimitSpec.nullToNoopLimitSpec(limitSpec);
|
||||
|
||||
this.subtotalsSpec = verifySubtotalsSpec(subtotalsSpec, dimensions);
|
||||
this.subtotalsSpec = verifySubtotalsSpec(subtotalsSpec, this.dimensions);
|
||||
|
||||
// Verify no duplicate names between dimensions, aggregators, and postAggregators.
|
||||
// They will all end up in the same namespace in the returned Rows and we can't have them clobbering each other.
|
||||
|
@ -211,7 +214,11 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
this.applyLimitPushDown = determineApplyLimitPushDown();
|
||||
}
|
||||
|
||||
private List<List<String>> verifySubtotalsSpec(List<List<String>> subtotalsSpec, List<DimensionSpec> dimensions)
|
||||
@Nullable
|
||||
private List<List<String>> verifySubtotalsSpec(
|
||||
@Nullable List<List<String>> subtotalsSpec,
|
||||
List<DimensionSpec> dimensions
|
||||
)
|
||||
{
|
||||
// if subtotalsSpec exists then validate that all are subsets of dimensions spec and are in same order.
|
||||
// For example if we had {D1, D2, D3} in dimensions spec then
|
||||
|
@ -736,20 +743,37 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
|
||||
public static class Builder
|
||||
{
|
||||
@Nullable
|
||||
private static List<List<String>> copySubtotalSpec(@Nullable List<List<String>> subtotalsSpec)
|
||||
{
|
||||
if (subtotalsSpec == null) {
|
||||
return null;
|
||||
}
|
||||
return subtotalsSpec.stream().map(ArrayList::new).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private DataSource dataSource;
|
||||
private QuerySegmentSpec querySegmentSpec;
|
||||
private VirtualColumns virtualColumns;
|
||||
@Nullable
|
||||
private DimFilter dimFilter;
|
||||
private Granularity granularity;
|
||||
@Nullable
|
||||
private List<DimensionSpec> dimensions;
|
||||
@Nullable
|
||||
private List<AggregatorFactory> aggregatorSpecs;
|
||||
@Nullable
|
||||
private List<PostAggregator> postAggregatorSpecs;
|
||||
@Nullable
|
||||
private HavingSpec havingSpec;
|
||||
|
||||
private Map<String, Object> context;
|
||||
|
||||
@Nullable
|
||||
private List<List<String>> subtotalsSpec = null;
|
||||
@Nullable
|
||||
private LimitSpec limitSpec = null;
|
||||
@Nullable
|
||||
private Function<Sequence<Row>, Sequence<Row>> postProcessingFn;
|
||||
private List<OrderByColumnSpec> orderByColumnSpecs = new ArrayList<>();
|
||||
private int limit = Integer.MAX_VALUE;
|
||||
|
@ -787,6 +811,7 @@ public class GroupByQuery extends BaseQuery<Row>
|
|||
postAggregatorSpecs = builder.postAggregatorSpecs;
|
||||
havingSpec = builder.havingSpec;
|
||||
limitSpec = builder.limitSpec;
|
||||
subtotalsSpec = copySubtotalSpec(builder.subtotalsSpec);
|
||||
postProcessingFn = builder.postProcessingFn;
|
||||
limit = builder.limit;
|
||||
orderByColumnSpecs = new ArrayList<>(builder.orderByColumnSpecs);
|
||||
|
|
|
@ -233,7 +233,7 @@ public class BufferArrayGrouper implements IntGrouper
|
|||
|
||||
return new CloseableIterator<Entry<Integer>>()
|
||||
{
|
||||
int cur = -1;
|
||||
int cur;
|
||||
boolean findNext = false;
|
||||
|
||||
{
|
||||
|
|
|
@ -173,6 +173,7 @@ public class BufferHashGrouper<KeyType> extends AbstractBufferHashGrouper<KeyTyp
|
|||
}
|
||||
|
||||
if (sorted) {
|
||||
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
|
||||
final List<Integer> wrappedOffsets = new AbstractList<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
|
|
@ -254,6 +254,7 @@ public class LimitedBufferHashGrouper<KeyType> extends AbstractBufferHashGrouper
|
|||
{
|
||||
final int size = offsetHeap.getHeapSize();
|
||||
|
||||
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
|
||||
final List<Integer> wrappedOffsets = new AbstractList<Integer>()
|
||||
{
|
||||
@Override
|
||||
|
|
|
@ -72,7 +72,7 @@ public class SpillingGrouper<KeyType> implements Grouper<KeyType>
|
|||
private final List<File> dictionaryFiles = new ArrayList<>();
|
||||
private final boolean sortHasNonGroupingFields;
|
||||
|
||||
private boolean spillingAllowed = false;
|
||||
private boolean spillingAllowed;
|
||||
|
||||
public SpillingGrouper(
|
||||
final Supplier<ByteBuffer> bufferSupplier,
|
||||
|
|
|
@ -122,7 +122,7 @@ public class StringComparators
|
|||
int ch1 = str1.codePointAt(pos[0]);
|
||||
int ch2 = str2.codePointAt(pos[1]);
|
||||
|
||||
int result = 0;
|
||||
int result;
|
||||
|
||||
if (isDigit(ch1)) {
|
||||
result = isDigit(ch2) ? compareNumbers(str1, str2, pos) : -1;
|
||||
|
@ -135,7 +135,7 @@ public class StringComparators
|
|||
}
|
||||
}
|
||||
|
||||
return str1.length() - str2.length();
|
||||
return Integer.compare(str1.length(), str2.length());
|
||||
}
|
||||
|
||||
private int compareNumbers(String str0, String str1, int[] pos)
|
||||
|
|
|
@ -33,7 +33,7 @@ public class ScanQueryLimitRowIterator implements CloseableIterator<ScanResultVa
|
|||
{
|
||||
private Yielder<ScanResultValue> yielder;
|
||||
private String resultFormat;
|
||||
private long limit = 0;
|
||||
private long limit;
|
||||
private long count = 0;
|
||||
|
||||
public ScanQueryLimitRowIterator(
|
||||
|
|
|
@ -285,22 +285,22 @@ public class SearchQueryQueryToolChest extends QueryToolChest<Result<SearchResul
|
|||
@Override
|
||||
public SearchHit apply(@Nullable Object input)
|
||||
{
|
||||
String dim = null;
|
||||
String val = null;
|
||||
Integer cnt = null;
|
||||
String dim;
|
||||
String val;
|
||||
Integer count;
|
||||
if (input instanceof Map) {
|
||||
dim = outputNameMap.get((String) ((Map) input).get("dimension"));
|
||||
val = (String) ((Map) input).get("value");
|
||||
cnt = (Integer) ((Map) input).get("count");
|
||||
count = (Integer) ((Map) input).get("count");
|
||||
} else if (input instanceof SearchHit) {
|
||||
SearchHit cached = (SearchHit) input;
|
||||
dim = outputNameMap.get(cached.getDimension());
|
||||
val = cached.getValue();
|
||||
cnt = cached.getCount();
|
||||
count = cached.getCount();
|
||||
} else {
|
||||
throw new IAE("Unknown format [%s]", input.getClass());
|
||||
}
|
||||
return new SearchHit(dim, val, cnt);
|
||||
return new SearchHit(dim, val, count);
|
||||
}
|
||||
}
|
||||
)
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.google.common.collect.Lists;
|
|||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.MinMaxPriorityQueue;
|
||||
import com.google.common.primitives.Longs;
|
||||
import org.apache.druid.java.util.common.guava.Comparators;
|
||||
import org.apache.druid.query.Result;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
|
@ -137,7 +136,7 @@ public class SelectResultValueBuilder
|
|||
protected Queue<EventHolder> instantiatePQueue()
|
||||
{
|
||||
int threshold = pagingSpec.getThreshold();
|
||||
return MinMaxPriorityQueue.orderedBy(descending ? Comparators.inverse(comparator) : comparator)
|
||||
return MinMaxPriorityQueue.orderedBy(descending ? comparator.reversed() : comparator)
|
||||
.maximumSize(threshold > 0 ? threshold : Integer.MAX_VALUE)
|
||||
.create();
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ public class AggregateTopNMetricFirstAlgorithm implements TopNAlgorithm<int[], T
|
|||
|
||||
PooledTopNAlgorithm singleMetricAlgo = new PooledTopNAlgorithm(storageAdapter, singleMetricQuery, bufferPool);
|
||||
PooledTopNAlgorithm.PooledTopNParams singleMetricParam = null;
|
||||
int[] dimValSelector = null;
|
||||
int[] dimValSelector;
|
||||
try {
|
||||
singleMetricParam = singleMetricAlgo.makeInitParams(params.getSelectorPlus(), params.getCursor());
|
||||
singleMetricAlgo.run(
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.druid.query.topn;
|
|||
|
||||
import com.fasterxml.jackson.annotation.JsonCreator;
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import org.apache.druid.java.util.common.guava.Comparators;
|
||||
import org.apache.druid.query.aggregation.AggregatorFactory;
|
||||
import org.apache.druid.query.aggregation.PostAggregator;
|
||||
import org.apache.druid.query.dimension.DimensionSpec;
|
||||
|
@ -67,23 +66,7 @@ public class InvertedTopNMetricSpec implements TopNMetricSpec
|
|||
final List<PostAggregator> postAggregatorSpecs
|
||||
)
|
||||
{
|
||||
return Comparators.inverse(
|
||||
new Comparator()
|
||||
{
|
||||
@Override
|
||||
public int compare(Object o1, Object o2)
|
||||
{
|
||||
// nulls last
|
||||
if (o1 == null) {
|
||||
return 1;
|
||||
}
|
||||
if (o2 == null) {
|
||||
return -1;
|
||||
}
|
||||
return delegate.getComparator(aggregatorSpecs, postAggregatorSpecs).compare(o1, o2);
|
||||
}
|
||||
}
|
||||
);
|
||||
return Comparator.nullsFirst(delegate.getComparator(aggregatorSpecs, postAggregatorSpecs).reversed());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -78,7 +78,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
public class IndexIO
|
||||
{
|
||||
|
@ -484,14 +483,6 @@ public class IndexIO
|
|||
}
|
||||
}
|
||||
|
||||
Set<String> colSet = new TreeSet<>();
|
||||
for (String dimension : index.getAvailableDimensions()) {
|
||||
colSet.add(dimension);
|
||||
}
|
||||
for (String metric : index.getAvailableMetrics()) {
|
||||
colSet.add(metric);
|
||||
}
|
||||
|
||||
columns.put(
|
||||
ColumnHolder.TIME_COLUMN_NAME,
|
||||
new ColumnBuilder()
|
||||
|
|
|
@ -47,7 +47,7 @@ public class BlockLayoutColumnarLongsSerializer implements ColumnarLongsSerializ
|
|||
private int numInserted = 0;
|
||||
private int numInsertedForNextFlush;
|
||||
|
||||
private ByteBuffer endBuffer = null;
|
||||
private ByteBuffer endBuffer;
|
||||
|
||||
BlockLayoutColumnarLongsSerializer(
|
||||
SegmentWriteOutMedium segmentWriteOutMedium,
|
||||
|
|
|
@ -397,7 +397,7 @@ public class Filters
|
|||
{
|
||||
private final int bitmapIndexCardinality = bitmapIndex.getCardinality();
|
||||
private int nextIndex = 0;
|
||||
private int found = -1;
|
||||
private int found;
|
||||
|
||||
{
|
||||
found = findNextIndex();
|
||||
|
|
|
@ -174,7 +174,7 @@ public class LikeFilter implements Filter
|
|||
return new IntIterator()
|
||||
{
|
||||
int currIndex = startIndex;
|
||||
int found = -1;
|
||||
int found;
|
||||
|
||||
{
|
||||
found = findNext();
|
||||
|
|
|
@ -322,7 +322,7 @@ public class JavaScriptAggregatorTest
|
|||
|
||||
// warmup
|
||||
int i = 0;
|
||||
long t = 0;
|
||||
long t;
|
||||
while (i < 10000) {
|
||||
aggregate(selector, aggRhino);
|
||||
++i;
|
||||
|
|
|
@ -137,9 +137,6 @@ public class SelectBinaryFnTest
|
|||
Assert.assertEquals(res1.getTimestamp(), merged.getTimestamp());
|
||||
|
||||
LinkedHashMap<String, Integer> expectedPageIds = Maps.newLinkedHashMap();
|
||||
expectedPageIds.put(segmentId1, 0);
|
||||
expectedPageIds.put(segmentId2, 0);
|
||||
expectedPageIds.put(segmentId2, 1);
|
||||
expectedPageIds.put(segmentId1, 1);
|
||||
expectedPageIds.put(segmentId2, 2);
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.google.common.collect.Iterables;
|
|||
import com.google.common.io.CharSource;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.druid.java.util.common.DateTimes;
|
||||
import org.apache.druid.java.util.common.Intervals;
|
||||
import org.apache.druid.java.util.common.granularity.Granularities;
|
||||
import org.apache.druid.query.Druids;
|
||||
import org.apache.druid.query.QueryPlus;
|
||||
|
@ -40,7 +39,6 @@ import org.apache.druid.segment.TestIndex;
|
|||
import org.apache.druid.segment.incremental.IncrementalIndex;
|
||||
import org.apache.druid.segment.incremental.IncrementalIndexSchema;
|
||||
import org.apache.druid.timeline.DataSegment;
|
||||
import org.apache.druid.timeline.TimelineObjectHolder;
|
||||
import org.apache.druid.timeline.VersionedIntervalTimeline;
|
||||
import org.apache.druid.timeline.partition.NoneShardSpec;
|
||||
import org.apache.druid.timeline.partition.SingleElementPartitionChunk;
|
||||
|
@ -80,7 +78,6 @@ public class TimeBoundaryQueryRunnerTest
|
|||
);
|
||||
private static Segment segment0;
|
||||
private static Segment segment1;
|
||||
private static List<String> segmentIdentifiers;
|
||||
|
||||
public TimeBoundaryQueryRunnerTest(
|
||||
QueryRunner runner
|
||||
|
@ -157,11 +154,6 @@ public class TimeBoundaryQueryRunnerTest
|
|||
timeline.add(index0.getInterval(), "v1", new SingleElementPartitionChunk(segment0));
|
||||
timeline.add(index1.getInterval(), "v1", new SingleElementPartitionChunk(segment1));
|
||||
|
||||
segmentIdentifiers = new ArrayList<>();
|
||||
for (TimelineObjectHolder<String, ?> holder : timeline.lookup(Intervals.of("2011-01-12/2011-01-17"))) {
|
||||
segmentIdentifiers.add(makeIdentifier(holder.getInterval(), holder.getVersion()));
|
||||
}
|
||||
|
||||
return QueryRunnerTestHelper.makeFilteringQueryRunner(timeline, factory);
|
||||
}
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.druid.query.topn.TopNQueryBuilder;
|
|||
import org.apache.druid.query.topn.TopNResultValue;
|
||||
import org.apache.druid.segment.incremental.IncrementalIndex;
|
||||
import org.apache.druid.segment.writeout.SegmentWriteOutMediumFactory;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
@ -167,6 +168,7 @@ public class SchemalessTestSimpleTest
|
|||
|
||||
// @Test TODO: Handling of null values is inconsistent right now, need to make it all consistent and re-enable test
|
||||
// TODO: Complain to Eric when you see this. It shouldn't be like this...
|
||||
@Ignore
|
||||
@SuppressWarnings("unused")
|
||||
public void testFullOnTopN()
|
||||
{
|
||||
|
|
|
@ -137,7 +137,6 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
checkNotNull(e);
|
||||
checkSize(e);
|
||||
long nanos = unit.toNanos(timeout);
|
||||
boolean added = false;
|
||||
putLock.lockInterruptibly();
|
||||
try {
|
||||
while (currentSize.get() + getBytesSize(e) > capacity) {
|
||||
|
@ -148,16 +147,12 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
}
|
||||
delegate.add(e);
|
||||
elementAdded(e);
|
||||
added = true;
|
||||
}
|
||||
finally {
|
||||
putLock.unlock();
|
||||
}
|
||||
if (added) {
|
||||
signalNotEmpty();
|
||||
}
|
||||
return added;
|
||||
|
||||
signalNotEmpty();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -222,12 +217,12 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
if (c == this) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
int n = 0;
|
||||
int n;
|
||||
takeLock.lock();
|
||||
try {
|
||||
// elementCount.get provides visibility to first n Nodes
|
||||
n = Math.min(maxElements, elementCount.get());
|
||||
if (n < 0) {
|
||||
if (n <= 0) {
|
||||
return 0;
|
||||
}
|
||||
for (int i = 0; i < n; i++) {
|
||||
|
@ -239,9 +234,7 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
finally {
|
||||
takeLock.unlock();
|
||||
}
|
||||
if (n > 0) {
|
||||
signalNotFull();
|
||||
}
|
||||
signalNotFull();
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -250,7 +243,7 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
{
|
||||
checkNotNull(e);
|
||||
checkSize(e);
|
||||
boolean added = false;
|
||||
boolean added;
|
||||
putLock.lock();
|
||||
try {
|
||||
if (currentSize.get() + getBytesSize(e) > capacity) {
|
||||
|
@ -274,7 +267,7 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
@Override
|
||||
public E poll()
|
||||
{
|
||||
E e = null;
|
||||
E e;
|
||||
takeLock.lock();
|
||||
try {
|
||||
e = delegate.poll();
|
||||
|
@ -295,7 +288,7 @@ public abstract class BytesBoundedLinkedQueue<E> extends AbstractQueue<E> implem
|
|||
public E poll(long timeout, TimeUnit unit) throws InterruptedException
|
||||
{
|
||||
long nanos = unit.toNanos(timeout);
|
||||
E e = null;
|
||||
E e;
|
||||
takeLock.lockInterruptibly();
|
||||
try {
|
||||
while (elementCount.get() == 0) {
|
||||
|
|
|
@ -639,7 +639,7 @@ public abstract class SQLMetadataConnector implements MetadataStorageConnector
|
|||
{
|
||||
MetadataStorageConnectorConfig connectorConfig = getConfig();
|
||||
|
||||
BasicDataSource dataSource = null;
|
||||
BasicDataSource dataSource;
|
||||
|
||||
try {
|
||||
Properties dbcpProperties = connectorConfig.getDbcpProperties();
|
||||
|
|
|
@ -242,7 +242,7 @@ public class EventReceiverFirehoseFactory implements FirehoseFactory<InputRowPar
|
|||
}
|
||||
|
||||
CountingInputStream countingInputStream = new CountingInputStream(in);
|
||||
Collection<Map<String, Object>> events = null;
|
||||
Collection<Map<String, Object>> events;
|
||||
try {
|
||||
events = objectMapper.readValue(
|
||||
countingInputStream,
|
||||
|
|
|
@ -154,7 +154,7 @@ public class ReplicationThrottler
|
|||
lifetime = maxLifetime;
|
||||
lifetimes.put(tier, lifetime);
|
||||
}
|
||||
lifetimes.put(tier, --lifetime);
|
||||
lifetimes.put(tier, lifetime - 1);
|
||||
}
|
||||
|
||||
public void resetLifetime(String tier)
|
||||
|
|
|
@ -47,7 +47,7 @@ public class SegmentReplicantLookup
|
|||
if (numReplicants == null) {
|
||||
numReplicants = 0;
|
||||
}
|
||||
segmentsInCluster.put(segment.getIdentifier(), server.getTier(), ++numReplicants);
|
||||
segmentsInCluster.put(segment.getIdentifier(), server.getTier(), numReplicants + 1);
|
||||
}
|
||||
|
||||
// Also account for queued segments
|
||||
|
@ -56,7 +56,7 @@ public class SegmentReplicantLookup
|
|||
if (numReplicants == null) {
|
||||
numReplicants = 0;
|
||||
}
|
||||
loadingSegments.put(segment.getIdentifier(), server.getTier(), ++numReplicants);
|
||||
loadingSegments.put(segment.getIdentifier(), server.getTier(), numReplicants + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -276,7 +276,7 @@ public class DatasourcesResource
|
|||
return Response.noContent().build();
|
||||
}
|
||||
|
||||
final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
|
||||
final Comparator<Interval> comparator = Comparators.intervalsByStartThenEnd().reversed();
|
||||
|
||||
if (full != null) {
|
||||
final Map<Interval, Map<String, Object>> retVal = new TreeMap<>(comparator);
|
||||
|
@ -342,7 +342,7 @@ public class DatasourcesResource
|
|||
return Response.noContent().build();
|
||||
}
|
||||
|
||||
final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
|
||||
final Comparator<Interval> comparator = Comparators.intervalsByStartThenEnd().reversed();
|
||||
if (full != null) {
|
||||
final Map<Interval, Map<String, Object>> retVal = new TreeMap<>(comparator);
|
||||
for (DataSegment dataSegment : dataSource.getSegments()) {
|
||||
|
@ -385,7 +385,7 @@ public class DatasourcesResource
|
|||
return Response.ok(retVal).build();
|
||||
}
|
||||
|
||||
final Set<String> retVal = new TreeSet<>(Comparators.inverse(String.CASE_INSENSITIVE_ORDER));
|
||||
final Set<String> retVal = new TreeSet<>(String.CASE_INSENSITIVE_ORDER.reversed());
|
||||
for (DataSegment dataSegment : dataSource.getSegments()) {
|
||||
if (theInterval.contains(dataSegment.getInterval())) {
|
||||
retVal.add(dataSegment.getIdentifier());
|
||||
|
|
|
@ -70,7 +70,7 @@ public class IntervalsResource
|
|||
@Produces(MediaType.APPLICATION_JSON)
|
||||
public Response getIntervals(@Context final HttpServletRequest req)
|
||||
{
|
||||
final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
|
||||
final Comparator<Interval> comparator = Comparators.intervalsByStartThenEnd().reversed();
|
||||
final Set<ImmutableDruidDataSource> datasources = InventoryViewUtils.getSecuredDataSources(
|
||||
req,
|
||||
serverInventoryView,
|
||||
|
@ -80,11 +80,7 @@ public class IntervalsResource
|
|||
final Map<Interval, Map<String, Map<String, Object>>> retVal = new TreeMap<>(comparator);
|
||||
for (ImmutableDruidDataSource dataSource : datasources) {
|
||||
for (DataSegment dataSegment : dataSource.getSegments()) {
|
||||
Map<String, Map<String, Object>> interval = retVal.get(dataSegment.getInterval());
|
||||
if (interval == null) {
|
||||
Map<String, Map<String, Object>> tmp = new HashMap<>();
|
||||
retVal.put(dataSegment.getInterval(), tmp);
|
||||
}
|
||||
retVal.computeIfAbsent(dataSegment.getInterval(), i -> new HashMap<>());
|
||||
setProperties(retVal, dataSource, dataSegment);
|
||||
}
|
||||
}
|
||||
|
@ -109,18 +105,14 @@ public class IntervalsResource
|
|||
authorizerMapper
|
||||
);
|
||||
|
||||
final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
|
||||
final Comparator<Interval> comparator = Comparators.intervalsByStartThenEnd().reversed();
|
||||
|
||||
if (full != null) {
|
||||
final Map<Interval, Map<String, Map<String, Object>>> retVal = new TreeMap<>(comparator);
|
||||
for (ImmutableDruidDataSource dataSource : datasources) {
|
||||
for (DataSegment dataSegment : dataSource.getSegments()) {
|
||||
if (theInterval.contains(dataSegment.getInterval())) {
|
||||
Map<String, Map<String, Object>> dataSourceInterval = retVal.get(dataSegment.getInterval());
|
||||
if (dataSourceInterval == null) {
|
||||
Map<String, Map<String, Object>> tmp = new HashMap<>();
|
||||
retVal.put(dataSegment.getInterval(), tmp);
|
||||
}
|
||||
retVal.computeIfAbsent(dataSegment.getInterval(), k -> new HashMap<>());
|
||||
setProperties(retVal, dataSource, dataSegment);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -482,7 +482,7 @@ public class LookupCoordinatorResource
|
|||
)
|
||||
{
|
||||
try {
|
||||
Collection<String> tiers = null;
|
||||
Collection<String> tiers;
|
||||
if (discover) {
|
||||
tiers = lookupCoordinatorManager.discoverTiers();
|
||||
} else {
|
||||
|
|
|
@ -57,7 +57,7 @@ public class JettyServerInitUtils
|
|||
for (ServletFilterHolder servletFilterHolder : extensionFilters) {
|
||||
// Check the Filter first to guard against people who don't read the docs and return the Class even
|
||||
// when they have an instance.
|
||||
FilterHolder holder = null;
|
||||
FilterHolder holder;
|
||||
if (servletFilterHolder.getFilter() != null) {
|
||||
holder = new FilterHolder(servletFilterHolder.getFilter());
|
||||
} else if (servletFilterHolder.getFilterClass() != null) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.druid.java.util.common.ISE;
|
|||
import org.apache.druid.java.util.emitter.EmittingLogger;
|
||||
import org.apache.druid.query.QueryInterruptedException;
|
||||
import org.apache.druid.server.DruidNode;
|
||||
import org.eclipse.jetty.server.Response;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
|
@ -94,7 +93,7 @@ public class PreResponseAuthorizationCheckFilter implements Filter
|
|||
);
|
||||
}
|
||||
|
||||
if (authInfoChecked != null && !authInfoChecked && response.getStatus() != Response.SC_FORBIDDEN) {
|
||||
if (authInfoChecked != null && !authInfoChecked && response.getStatus() != HttpServletResponse.SC_FORBIDDEN) {
|
||||
handleAuthorizationCheckError(
|
||||
"Request's authorization check failed but status code was not 403.",
|
||||
request,
|
||||
|
@ -134,7 +133,7 @@ public class PreResponseAuthorizationCheckFilter implements Filter
|
|||
);
|
||||
unauthorizedError.setStackTrace(new StackTraceElement[0]);
|
||||
OutputStream out = response.getOutputStream();
|
||||
sendJsonError(response, Response.SC_UNAUTHORIZED, jsonMapper.writeValueAsString(unauthorizedError), out);
|
||||
sendJsonError(response, HttpServletResponse.SC_UNAUTHORIZED, jsonMapper.writeValueAsString(unauthorizedError), out);
|
||||
out.close();
|
||||
return;
|
||||
}
|
||||
|
@ -157,7 +156,7 @@ public class PreResponseAuthorizationCheckFilter implements Filter
|
|||
throw new ISE(errorMsg);
|
||||
} else {
|
||||
try {
|
||||
servletResponse.sendError(Response.SC_FORBIDDEN);
|
||||
servletResponse.sendError(HttpServletResponse.SC_FORBIDDEN);
|
||||
}
|
||||
catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
import org.apache.druid.query.QueryInterruptedException;
|
||||
import org.apache.druid.server.DruidNode;
|
||||
import org.eclipse.jetty.server.Response;
|
||||
|
||||
import javax.servlet.Filter;
|
||||
import javax.servlet.FilterChain;
|
||||
|
@ -79,7 +78,7 @@ public class SecuritySanityCheckFilter implements Filter
|
|||
|
||||
AuthenticationResult result = (AuthenticationResult) request.getAttribute(AuthConfig.DRUID_AUTHENTICATION_RESULT);
|
||||
if (authInfoChecked != null || result != null || allowUnsecured != null) {
|
||||
sendJsonError(httpResponse, Response.SC_FORBIDDEN, unauthorizedMessage, out);
|
||||
sendJsonError(httpResponse, HttpServletResponse.SC_FORBIDDEN, unauthorizedMessage, out);
|
||||
out.close();
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2875,6 +2875,9 @@ public class CachingClusteredClientTest
|
|||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (!(o instanceof DataSegment)) {
|
||||
return false;
|
||||
}
|
||||
return baseSegment.equals(o);
|
||||
}
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ public class BytesBoundedLinkedQueueTest
|
|||
Assert.assertFalse(q.offer(new TestObject(2), delayMS, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
|
||||
// @Test
|
||||
@Test
|
||||
public void testConcurrentOperations() throws Exception
|
||||
{
|
||||
final BlockingQueue<TestObject> q = getQueue(Integer.MAX_VALUE);
|
||||
|
@ -240,8 +240,7 @@ public class BytesBoundedLinkedQueueTest
|
|||
public Boolean call()
|
||||
{
|
||||
while (!stopTest.get()) {
|
||||
System.out
|
||||
.println("drained elements : " + q.drainTo(new ArrayList<TestObject>(), Integer.MAX_VALUE));
|
||||
q.drainTo(new ArrayList<>(), Integer.MAX_VALUE);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -172,8 +172,8 @@ public class HybridCacheTest
|
|||
Assert.assertEquals(Sets.newHashSet(key3), res.keySet());
|
||||
Assert.assertArrayEquals(value3, res.get(key3));
|
||||
|
||||
Assert.assertEquals(++hits, cache.getStats().getNumHits());
|
||||
Assert.assertEquals(++misses, cache.getStats().getNumMisses());
|
||||
Assert.assertEquals(hits + 1, cache.getStats().getNumHits());
|
||||
Assert.assertEquals(misses + 1, cache.getStats().getNumMisses());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,7 +131,7 @@ public class MemcachedCacheBenchmark extends SimpleBenchmark
|
|||
|
||||
public long timeGetObject(int reps)
|
||||
{
|
||||
byte[] bytes = null;
|
||||
byte[] bytes;
|
||||
long count = 0;
|
||||
for (int i = 0; i < reps; i++) {
|
||||
for (int k = 0; k < objectCount; ++k) {
|
||||
|
|
|
@ -30,9 +30,6 @@ import org.easymock.EasyMock;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
public class AppenderatorPlumberTest
|
||||
{
|
||||
private final AppenderatorPlumber plumber;
|
||||
|
@ -94,43 +91,29 @@ public class AppenderatorPlumberTest
|
|||
@Test
|
||||
public void testSimpleIngestion() throws Exception
|
||||
{
|
||||
|
||||
final ConcurrentMap<String, String> commitMetadata = new ConcurrentHashMap<>();
|
||||
|
||||
Appenderator appenderator = appenderatorTester.getAppenderator();
|
||||
|
||||
// startJob
|
||||
Assert.assertEquals(null, plumber.startJob());
|
||||
|
||||
// getDataSource
|
||||
Assert.assertEquals(AppenderatorTester.DATASOURCE,
|
||||
appenderator.getDataSource());
|
||||
Assert.assertEquals(AppenderatorTester.DATASOURCE, appenderator.getDataSource());
|
||||
|
||||
InputRow[] rows = new InputRow[] {AppenderatorTest.IR("2000", "foo", 1),
|
||||
AppenderatorTest.IR("2000", "bar", 2), AppenderatorTest.IR("2000", "qux", 4)};
|
||||
// add
|
||||
commitMetadata.put("x", "1");
|
||||
Assert.assertEquals(
|
||||
1,
|
||||
plumber.add(rows[0], null).getRowCount());
|
||||
Assert.assertEquals(1, plumber.add(rows[0], null).getRowCount());
|
||||
|
||||
commitMetadata.put("x", "2");
|
||||
Assert.assertEquals(
|
||||
2,
|
||||
plumber.add(rows[1], null).getRowCount());
|
||||
Assert.assertEquals(2, plumber.add(rows[1], null).getRowCount());
|
||||
|
||||
commitMetadata.put("x", "3");
|
||||
Assert.assertEquals(
|
||||
3,
|
||||
plumber.add(rows[2], null).getRowCount());
|
||||
Assert.assertEquals(3, plumber.add(rows[2], null).getRowCount());
|
||||
|
||||
|
||||
Assert.assertEquals(1, plumber.getSegmentsView().size());
|
||||
|
||||
SegmentIdentifier si = plumber.getSegmentsView().values().toArray(new SegmentIdentifier[0])[0];
|
||||
|
||||
Assert.assertEquals(3,
|
||||
appenderator.getRowCount(si));
|
||||
Assert.assertEquals(3, appenderator.getRowCount(si));
|
||||
|
||||
appenderator.clear();
|
||||
Assert.assertTrue(appenderator.getSegments().isEmpty());
|
||||
|
|
|
@ -307,14 +307,13 @@ public class CuratorDruidCoordinatorTest extends CuratorTestBase
|
|||
DataSegment segmentToMove = sourceSegments.get(2);
|
||||
|
||||
List<String> sourceSegKeys = new ArrayList<>();
|
||||
List<String> destSegKeys = new ArrayList<>();
|
||||
|
||||
for (DataSegment segment : sourceSegments) {
|
||||
sourceSegKeys.add(announceBatchSegmentsForServer(source, ImmutableSet.of(segment), zkPathsConfig, jsonMapper));
|
||||
}
|
||||
|
||||
for (DataSegment segment : destinationSegments) {
|
||||
destSegKeys.add(announceBatchSegmentsForServer(dest, ImmutableSet.of(segment), zkPathsConfig, jsonMapper));
|
||||
announceBatchSegmentsForServer(dest, ImmutableSet.of(segment), zkPathsConfig, jsonMapper);
|
||||
}
|
||||
|
||||
Assert.assertTrue(timing.forWaiting().awaitLatch(segmentViewInitLatch));
|
||||
|
|
|
@ -94,7 +94,6 @@ public class JavaScriptTieredBrokerSelectorStrategyTest
|
|||
public void testGetBrokerServiceName()
|
||||
{
|
||||
final LinkedHashMap<String, String> tierBrokerMap = new LinkedHashMap<>();
|
||||
tierBrokerMap.put("fast", "druid/fastBroker");
|
||||
tierBrokerMap.put("fast", "druid/broker");
|
||||
tierBrokerMap.put("slow", "druid/slowBroker");
|
||||
|
||||
|
|
|
@ -117,7 +117,6 @@ class CoordinatorJettyServerInitializer implements JettyServerInitializer
|
|||
final ObjectMapper jsonMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
|
||||
final AuthenticatorMapper authenticatorMapper = injector.getInstance(AuthenticatorMapper.class);
|
||||
|
||||
List<Authenticator> authenticators = null;
|
||||
AuthenticationUtils.addSecuritySanityCheckFilter(root, jsonMapper);
|
||||
|
||||
// perform no-op authorization for these resources
|
||||
|
@ -128,7 +127,7 @@ class CoordinatorJettyServerInitializer implements JettyServerInitializer
|
|||
AuthenticationUtils.addNoopAuthorizationFilters(root, CliOverlord.UNSECURED_PATHS);
|
||||
}
|
||||
|
||||
authenticators = authenticatorMapper.getAuthenticatorChain();
|
||||
List<Authenticator> authenticators = authenticatorMapper.getAuthenticatorChain();
|
||||
AuthenticationUtils.addAuthenticationFilterChain(root, authenticators);
|
||||
|
||||
AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions());
|
||||
|
|
|
@ -73,6 +73,7 @@ public class PullDependencies implements Runnable
|
|||
{
|
||||
private static final Logger log = new Logger(PullDependencies.class);
|
||||
|
||||
@SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
|
||||
private static final Set<String> exclusions = new HashSet<>(
|
||||
/*
|
||||
|
||||
|
|
|
@ -93,14 +93,13 @@ public class QueryJettyServerInitializer implements JettyServerInitializer
|
|||
final ObjectMapper jsonMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
|
||||
final AuthenticatorMapper authenticatorMapper = injector.getInstance(AuthenticatorMapper.class);
|
||||
|
||||
List<Authenticator> authenticators = null;
|
||||
AuthenticationUtils.addSecuritySanityCheckFilter(root, jsonMapper);
|
||||
|
||||
// perform no-op authorization for these resources
|
||||
AuthenticationUtils.addNoopAuthorizationFilters(root, UNSECURED_PATHS);
|
||||
AuthenticationUtils.addNoopAuthorizationFilters(root, authConfig.getUnsecuredPaths());
|
||||
|
||||
authenticators = authenticatorMapper.getAuthenticatorChain();
|
||||
List<Authenticator> authenticators = authenticatorMapper.getAuthenticatorChain();
|
||||
AuthenticationUtils.addAuthenticationFilterChain(root, authenticators);
|
||||
|
||||
AuthenticationUtils.addAllowOptionsFilter(root, authConfig.isAllowUnauthenticatedHttpOptions());
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.apache.druid.sql.avatica;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSortedMap;
|
||||
|
@ -29,7 +28,6 @@ import com.google.common.collect.Sets;
|
|||
import org.apache.druid.java.util.common.ISE;
|
||||
import org.apache.druid.java.util.common.logger.Logger;
|
||||
|
||||
import javax.annotation.Nullable;
|
||||
import javax.annotation.concurrent.GuardedBy;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -85,17 +83,9 @@ public class DruidConnection
|
|||
|
||||
// remove sensitive fields from the context, only the connection's context needs to have authentication
|
||||
// credentials
|
||||
Map<String, Object> sanitizedContext = new HashMap<>();
|
||||
sanitizedContext = Maps.filterEntries(
|
||||
Map<String, Object> sanitizedContext = Maps.filterEntries(
|
||||
context,
|
||||
new Predicate<Map.Entry<String, Object>>()
|
||||
{
|
||||
@Override
|
||||
public boolean apply(@Nullable Map.Entry<String, Object> input)
|
||||
{
|
||||
return !SENSITIVE_CONTEXT_FIELDS.contains(input.getKey());
|
||||
}
|
||||
}
|
||||
e -> !SENSITIVE_CONTEXT_FIELDS.contains(e.getKey())
|
||||
);
|
||||
|
||||
final DruidStatement statement = new DruidStatement(
|
||||
|
|
Loading…
Reference in New Issue