From ac05836833d6d945d2446bd72a1443dd5d0d6a52 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Xavier=20L=C3=A9aut=C3=A9?=
* InputRows are produced as fast as requested, so this can be used to determine the
* upper rate of ingest if sleepUsec is set to 0; nTokens specifies how many associated
@@ -55,8 +56,8 @@ import static java.lang.Thread.sleep;
* the moment an event is delivered.)
* Values are offset by adding the modulus of the token number to the random number
* so that token values have distinct, non-overlapping ranges.
- *
* [{ @@ -82,7 +83,7 @@ import static java.lang.Thread.sleep; * "basePersistDirectory" : "/tmp/realtime/basePersist" } * }] *- * + * * Example query using POST to /druid/v2/ (where UTC date and time MUST include the current hour): *
* { diff --git a/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java b/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java index 6755ee0a933..cc222d8098b 100644 --- a/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java +++ b/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java @@ -58,18 +58,11 @@ import static java.lang.Thread.sleep; * with timestamps along with ??. * The generated tuples have the form (timestamp, ????) * where the timestamp is from the twitter event. - *- *
- * + * * Example spec file: - *- *- * + * * Example query using POST to /druid/v2/?w (where w is an arbitrary parameter and the date and time * is UTC): - *- *- * * * Notes on twitter.com HTTP (REST) API: v1.0 will be disabled around 2013-03 so v1.1 should be used; * twitter4j 3.0 (not yet released) will support the v1.1 api. diff --git a/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java b/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java index fc06968f870..b44fbd524e1 100644 --- a/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java +++ b/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java @@ -253,9 +253,7 @@ public class ApproximateHistogram } /** - * Returns a string representation of the actual bin counts - * - * @return + * @return a string representation of the actual bin counts */ protected String getBinsString() { @@ -385,7 +383,7 @@ public class ApproximateHistogram /** * Merges the bin in the given position with the next bin * - * @param index index of the bin to merge, index must satisfy 0 <= index < binCount - 1 + * @param index index of the bin to merge, index must satisfy 0 <= index < binCount - 1 */ protected void merge(final int index) { @@ -506,9 +504,9 @@ public class ApproximateHistogram /** * Copies histogram h into the current histogram. * - * @param h + * @param h ApproximateHistogram to copy * - * @return + * @return this histogram */ public ApproximateHistogram copy(ApproximateHistogram h) { @@ -815,20 +813,20 @@ public class ApproximateHistogram /** * mergeBins performs the given number of bin merge operations on the given histogram - * + * * It repeatedly merges the two closest bins until it has performed the requested number of merge operations. * Merges are done in-place and unused bins have unknown state - * + * * next / prev maintains a doubly-linked list of valid bin indices into the mergedBins array. - * + * * Fast operation is achieved by building a min-heap of the deltas as opposed to repeatedly * scanning the array of deltas to find the minimum. A reverse index into the heap is maintained * to allow deleting and updating of specific deltas. - * + * * next and prev arrays are used to maintain indices to the previous / next valid bin from a given bin index - * + * * Its effect is equivalent to running the following code: - * + * ** ApproximateHistogram merged = new ApproximateHistogram(mergedBinCount, mergedPositions, mergedBins); * @@ -1104,7 +1102,7 @@ public class ApproximateHistogram /** * Returns a byte-array representation of this ApproximateHistogram object * - * @return + * @return byte array representation */ @JsonValue public byte[] toBytes() @@ -1181,7 +1179,7 @@ public class ApproximateHistogram /** * Writes the representation of this ApproximateHistogram object to the given byte-buffer * - * @param buf + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytes(ByteBuffer buf) { @@ -1196,10 +1194,10 @@ public class ApproximateHistogram /** * Writes the dense representation of this ApproximateHistogram object to the given byte-buffer - * + * * Requires 16 + 12 * size bytes of storage * - * @param buf + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesDense(ByteBuffer buf) { @@ -1217,10 +1215,10 @@ public class ApproximateHistogram /** * Writes the sparse representation of this ApproximateHistogram object to the given byte-buffer - * + * * Requires 16 + 12 * binCount bytes of storage * - * @param buf ByteBuffer to write object to + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesSparse(ByteBuffer buf) { @@ -1239,10 +1237,10 @@ public class ApproximateHistogram /** * Returns a compact byte-buffer representation of this ApproximateHistogram object * storing actual values as opposed to histogram bins - * - * Requires 3 + 4 * count bytes of storage with count <= 127 + * + * Requires 3 + 4 * count bytes of storage with count <= 127 * - * @param buf + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesCompact(ByteBuffer buf) { @@ -1285,9 +1283,9 @@ public class ApproximateHistogram /** * Constructs an Approximate Histogram object from the given byte-array representation * - * @param bytes + * @param bytes byte array to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given byte array */ public static ApproximateHistogram fromBytes(byte[] bytes) { @@ -1298,9 +1296,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given dense byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesDense(ByteBuffer buf) { @@ -1324,9 +1322,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given dense byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesSparse(ByteBuffer buf) { @@ -1352,9 +1350,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given compact byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesCompact(ByteBuffer buf) { @@ -1428,9 +1426,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytes(ByteBuffer buf) { @@ -1454,7 +1452,7 @@ public class ApproximateHistogram /** * Returns the approximate number of items less than or equal to b in the histogram * - * @param b + * @param b the cutoff * * @return the approximate number of items less than or equal to b */ @@ -1521,7 +1519,7 @@ public class ApproximateHistogram * probabilities = [.5f] returns [median] * probabilities = [.25f, .5f, .75f] returns the quartiles, [25%ile, median, 75%ile] * - * @param probabilities + * @param probabilities array of probabilities * * @return an array of length probabilities.length representing the the approximate sample quantiles * corresponding to the given probabilities @@ -1584,9 +1582,9 @@ public class ApproximateHistogram /** * Computes a visual representation of the approximate histogram with bins laid out according to the given breaks * - * @param breaks + * @param breaks breaks defining the histogram bins * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(final float[] breaks) { @@ -1607,7 +1605,7 @@ public class ApproximateHistogram * * @param size number of equal-sized bins to divide the histogram into * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(int size) { @@ -1629,7 +1627,7 @@ public class ApproximateHistogram * @param bucketSize the size of each bucket * @param offset the location of one breakpoint * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(final float bucketSize, final float offset) { diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java index 818dc9b3868..3012766f509 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java @@ -81,13 +81,13 @@ import java.util.Map; /** * Determines appropriate ShardSpecs for a job by determining whether or not partitioning is necessary, and if so, * choosing the best dimension that satisfies the criteria: - * + * *
A -> B -> Aa -> Ab+ * * That is, the two sub queryables for A would run *after* B is run, effectively meaning that the results for B * must be fully cached in memory before the results for Aa and Ab are computed. */ diff --git a/processing/src/main/java/io/druid/query/Druids.java b/processing/src/main/java/io/druid/query/Druids.java index ff03e8db7f7..fc475a2ba90 100644 --- a/processing/src/main/java/io/druid/query/Druids.java +++ b/processing/src/main/java/io/druid/query/Druids.java @@ -59,9 +59,9 @@ public class Druids /** * A Builder for AndDimFilter. - * + * * Required: fields() must be called before build() - * + * * Usage example: *
* AndDimFilter andDimFilter = Druids.newAndDimFilterBuilder()
@@ -105,9 +105,9 @@ public class Druids
/**
* A Builder for OrDimFilter.
- *
+ *
* Required: fields() must be called before build()
- *
+ *
* Usage example:
*
* OrDimFilter orDimFilter = Druids.newOrDimFilterBuilder()
@@ -160,9 +160,9 @@ public class Druids
/**
* A Builder for NotDimFilter.
- *
+ *
* Required: field() must be called before build()
- *
+ *
* Usage example:
*
* NotDimFilter notDimFilter = Druids.newNotDimFilterBuilder()
@@ -206,9 +206,9 @@ public class Druids
/**
* A Builder for SelectorDimFilter.
- *
+ *
* Required: dimension() and value() must be called before build()
- *
+ *
* Usage example:
*
* Selector selDimFilter = Druids.newSelectorDimFilterBuilder()
@@ -285,10 +285,10 @@ public class Druids
/**
* A Builder for TimeseriesQuery.
- *
+ *
* Required: dataSource(), intervals(), and aggregators() must be called before build()
* Optional: filters(), granularity(), postAggregators(), and context() can be called before build()
- *
+ *
* Usage example:
*
* TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
@@ -483,11 +483,11 @@ public class Druids
/**
* A Builder for SearchQuery.
- *
+ *
* Required: dataSource(), intervals(), dimensions() and query() must be called before build()
- *
+ *
* Optional: filters(), granularity(), and context() can be called before build()
- *
+ *
* Usage example:
*
* SearchQuery query = Druids.newSearchQueryBuilder()
@@ -678,9 +678,9 @@ public class Druids
/**
* A Builder for TimeBoundaryQuery.
- *
+ *
* Required: dataSource() must be called before build()
- *
+ *
* Usage example:
*
* TimeBoundaryQuery query = new MaxTimeQueryBuilder()
@@ -774,12 +774,12 @@ public class Druids
/**
* A Builder for Result.
- *
+ *
* Required: timestamp() and value() must be called before build()
- *
+ *
* Usage example:
*
- * Result result = Druids.newResultBuilder()
+ * Result<T> result = Druids.newResultBuilder()
* .timestamp(egDateTime)
* .value(egValue)
* .build();
@@ -840,9 +840,9 @@ public class Druids
/**
* A Builder for SegmentMetadataQuery.
- *
+ *
* Required: dataSource(), intervals() must be called before build()
- *
+ *
* Usage example:
*
* SegmentMetadataQuery query = new SegmentMetadataQueryBuilder()
@@ -948,9 +948,9 @@ public class Druids
/**
* A Builder for SelectQuery.
- *
+ *
* Required: dataSource(), intervals() must be called before build()
- *
+ *
* Usage example:
*
* SelectQuery query = new SelectQueryBuilder()
diff --git a/processing/src/main/java/io/druid/query/QuerySegmentWalker.java b/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
index 8e8c6b8c0fe..2ec96f52070 100644
--- a/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
+++ b/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
@@ -29,6 +29,8 @@ public interface QuerySegmentWalker
* Gets the Queryable for a given interval, the Queryable returned can be any version(s) or partitionNumber(s)
* such that it represents the interval.
*
+ * @param query result type
+ * @param query the query to find a Queryable for
* @param intervals the intervals to find a Queryable for
* @return a Queryable object that represents the interval
*/
@@ -36,8 +38,10 @@ public interface QuerySegmentWalker
/**
* Gets the Queryable for a given list of SegmentSpecs.
- * exist.
*
+ * @param the query result type
+ * @param query the query to return a Queryable for
+ * @param specs the list of SegmentSpecs to find a Queryable for
* @return the Queryable object with the given SegmentSpecs
*/
public QueryRunner getQueryRunnerForSegments(Query query, Iterable specs);
diff --git a/processing/src/main/java/io/druid/query/QueryToolChest.java b/processing/src/main/java/io/druid/query/QueryToolChest.java
index d2722c622be..7d42eda6792 100644
--- a/processing/src/main/java/io/druid/query/QueryToolChest.java
+++ b/processing/src/main/java/io/druid/query/QueryToolChest.java
@@ -40,9 +40,9 @@ public abstract class QueryToolChest mergeSequences(Sequence> seqOfSequences);
diff --git a/processing/src/main/java/io/druid/query/TimewarpOperator.java b/processing/src/main/java/io/druid/query/TimewarpOperator.java
index 49a8fb1ed85..66df6e3cf55 100644
--- a/processing/src/main/java/io/druid/query/TimewarpOperator.java
+++ b/processing/src/main/java/io/druid/query/TimewarpOperator.java
@@ -134,7 +134,7 @@ public class TimewarpOperator implements PostProcessingOperator
/**
* Map time t into the last `period` ending within `dataInterval`
*
- * @param t
+ * @param t the current time to be mapped into `dataInterval`
* @return the offset between the mapped time and time t
*/
protected long computeOffset(final long t)
diff --git a/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
index ad355c71b6b..bbfa6ae3dab 100644
--- a/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
+++ b/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
@@ -26,9 +26,9 @@ import java.util.List;
/**
* Processing related interface
- *
+ *
* An AggregatorFactory is an object that knows how to generate an Aggregator using a ColumnSelectorFactory.
- *
+ *
* This is useful as an abstraction to allow Aggregator classes to be written in terms of MetricSelector objects
* without making any assumptions about how they are pulling values out of the base data. That is, the data is
* provided to the Aggregator through the MetricSelector object, so whatever creates that object gets to choose how
diff --git a/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java b/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java
index 61617c42dd5..0b07e2745e8 100644
--- a/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java
+++ b/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java
@@ -37,6 +37,8 @@ public class AggregatorUtil
* such that all the dependencies of any given aggregator should occur before that aggregator.
* See AggregatorUtilTest.testOutOfOrderPruneDependentPostAgg for example.
* @param postAggName name of the postAgg on which dependency is to be calculated
+ *
+ * @return the list of dependent postAggregators
*/
public static List pruneDependentPostAgg(List postAggregatorList, String postAggName)
{
diff --git a/processing/src/main/java/io/druid/query/aggregation/Histogram.java b/processing/src/main/java/io/druid/query/aggregation/Histogram.java
index b46c02bdc7d..61ace3710ae 100644
--- a/processing/src/main/java/io/druid/query/aggregation/Histogram.java
+++ b/processing/src/main/java/io/druid/query/aggregation/Histogram.java
@@ -136,6 +136,8 @@ public class Histogram
* Returns a visual representation of a histogram object.
* Initially returns an array of just the min. and max. values
* but can also support the addition of quantiles.
+ *
+ * @return a visual representation of this histogram
*/
public HistogramVisual asVisual() {
float[] visualCounts = new float[bins.length - 2];
diff --git a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java
index 8e84e690130..b93c7ace592 100644
--- a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java
+++ b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java
@@ -28,18 +28,20 @@ import java.nio.ByteBuffer;
/**
* Implements the HyperLogLog cardinality estimator described in:
- *
+ *
* http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf
- *
+ *
* Run this code to see a simple indication of expected errors based on different m values:
- *
- * for (int i = 1; i < 20; ++i) {
- * System.out.printf("i[%,d], val[%,d] => error[%f%%]%n", i, 2 << i, 104 / Math.sqrt(2 << i));
- * }
- *
+ *
+ *
+ * for (int i = 1; i < 20; ++i) {
+ * System.out.printf("i[%,d], val[%,d] => error[%f%%]%n", i, 2 << i, 104 / Math.sqrt(2 << i));
+ * }
+ *
+ *
* This class is *not* multi-threaded. It can be passed among threads, but it is written with the assumption that
* only one thread is ever calling methods on it.
- *
+ *
* If you have multiple threads calling methods on this concurrently, I hope you manage to get correct behavior
*/
public abstract class HyperLogLogCollector implements Comparable
diff --git a/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java b/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java
index 6d4c2c78071..87e67e3f947 100644
--- a/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java
+++ b/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java
@@ -29,7 +29,7 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
/**
- * The ">" operator in a "having" clause. This is similar to SQL's "having aggregation > value",
+ * The ">" operator in a "having" clause. This is similar to SQL's "having aggregation > value",
* except that an aggregation in SQL is an expression instead of an aggregation name as in Druid.
*/
public class GreaterThanHavingSpec implements HavingSpec
diff --git a/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java b/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java
index 1f1a27bd9a7..61f4200e1ca 100644
--- a/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java
+++ b/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java
@@ -28,7 +28,7 @@ import java.nio.ByteBuffer;
import java.util.Arrays;
/**
- * The "<" operator in a "having" clause. This is similar to SQL's "having aggregation < value",
+ * The "<" operator in a "having" clause. This is similar to SQL's "having aggregation < value",
* except that an aggregation in SQL is an expression instead of an aggregation name as in Druid.
*/
public class LessThanHavingSpec implements HavingSpec
diff --git a/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java b/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java
index 8f78ffe6191..234545354cd 100644
--- a/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java
+++ b/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java
@@ -39,12 +39,12 @@ import java.util.Map;
/**
* A Builder for TopNQuery.
- *
+ *
* Required: dataSource(), intervals(), metric() and threshold() must be called before build()
* Additional requirement for numeric metric sorts: aggregators() must be called before build()
- *
+ *
* Optional: filters(), granularity(), postAggregators() and context() can be called before build()
- *
+ *
* Usage example:
*
* TopNQuery query = new TopNQueryBuilder()
diff --git a/processing/src/main/java/io/druid/segment/DimensionSelector.java b/processing/src/main/java/io/druid/segment/DimensionSelector.java
index 3922d345049..9627a4fef7d 100644
--- a/processing/src/main/java/io/druid/segment/DimensionSelector.java
+++ b/processing/src/main/java/io/druid/segment/DimensionSelector.java
@@ -42,7 +42,7 @@ public interface DimensionSelector
*
* Value cardinality would be 2.
*
- * @return
+ * @return the value cardinality
*/
public int getValueCardinality();
@@ -57,26 +57,26 @@ public interface DimensionSelector
*
* getRow() would return
*
- * getRow(0) => [0 1]
- * getRow(1) => [0]
- * getRow(2) => [0 1]
- * getRow(3) => [1]
+ * getRow(0) => [0 1]
+ * getRow(1) => [0]
+ * getRow(2) => [0 1]
+ * getRow(3) => [1]
*
* and then lookupName would return:
*
- * lookupName(0) => A
- * lookupName(1) => B
+ * lookupName(0) => A
+ * lookupName(1) => B
*
- * @param id
- * @return
+ * @param id id to lookup the field name for
+ * @return the field name for the given id
*/
public String lookupName(int id);
/**
* The ID is the int id value of the field.
*
- * @param name
- * @return
+ * @param name field name to look up the id for
+ * @return the id for the given field name
*/
public int lookupId(String name);
}
diff --git a/processing/src/main/java/io/druid/segment/IndexMerger.java b/processing/src/main/java/io/druid/segment/IndexMerger.java
index 99f854da9af..fdccd4cdb22 100644
--- a/processing/src/main/java/io/druid/segment/IndexMerger.java
+++ b/processing/src/main/java/io/druid/segment/IndexMerger.java
@@ -114,7 +114,8 @@ public class IndexMerger
* @param dataInterval the Interval that the data represents
* @param outDir the directory to persist the data to
*
- * @throws java.io.IOException
+ * @return the index output directory
+ * @throws java.io.IOException if an IO error occurs persisting the index
*/
public static File persist(final IncrementalIndex index, final Interval dataInterval, File outDir) throws IOException
{
diff --git a/processing/src/main/java/io/druid/segment/QueryableIndex.java b/processing/src/main/java/io/druid/segment/QueryableIndex.java
index 7dc1dcbe5d4..7de331e80e3 100644
--- a/processing/src/main/java/io/druid/segment/QueryableIndex.java
+++ b/processing/src/main/java/io/druid/segment/QueryableIndex.java
@@ -33,7 +33,7 @@ public interface QueryableIndex extends ColumnSelector
/**
* The close method shouldn't actually be here as this is nasty. We will adjust it in the future.
- * @throws java.io.IOException
+ * @throws java.io.IOException if an exception was thrown closing the index
*/
@Deprecated
public void close() throws IOException;
diff --git a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
index 3bb29f16d53..95ec126c8bb 100644
--- a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
+++ b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
@@ -41,13 +41,13 @@ import java.util.Map;
/**
* A generic, flat storage mechanism. Use static methods fromArray() or fromIterable() to construct. If input
* is sorted, supports binary search index lookups. If input is not sorted, only supports array-like index lookups.
- *
+ *
* V1 Storage Format:
- *
+ *
* byte 1: version (0x1)
- * byte 2 == 0x1 => allowReverseLookup
- * bytes 3-6 => numBytesUsed
- * bytes 7-10 => numElements
+ * byte 2 == 0x1 => allowReverseLookup
+ * bytes 3-6 => numBytesUsed
+ * bytes 7-10 => numElements
* bytes 10-((numElements * 4) + 10): integers representing *end* offsets of byte serialized values
* bytes ((numElements * 4) + 10)-(numBytesUsed + 2): 4-byte integer representing length of value, followed by bytes for value
*/
@@ -327,8 +327,9 @@ public class GenericIndexed implements Indexed, Closeable
/**
* The returned GenericIndexed must be closed to release the underlying memory
- * @param maxBytes
- * @return
+ *
+ * @param maxBytes maximum size in bytes of the lookup cache
+ * @return a copy of this GenericIndexed with a lookup cache.
*/
public GenericIndexed withCache(int maxBytes)
{
diff --git a/processing/src/main/java/io/druid/segment/data/Indexed.java b/processing/src/main/java/io/druid/segment/data/Indexed.java
index f8def0481b7..fe05d0f6d3d 100644
--- a/processing/src/main/java/io/druid/segment/data/Indexed.java
+++ b/processing/src/main/java/io/druid/segment/data/Indexed.java
@@ -19,9 +19,6 @@
package io.druid.segment.data;
-/**
- * @param
- */
public interface Indexed extends Iterable
{
Class extends T> getClazz();
diff --git a/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java b/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java
index 0f039ab0598..d9b2a8ab2c0 100644
--- a/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java
+++ b/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java
@@ -34,7 +34,7 @@ public interface ObjectStrategy extends Comparator
*
* @param buffer buffer to read value from
* @param numBytes number of bytes used to store the value, starting at buffer.position()
- * @return
+ * @return an object created from the given byte buffer representation
*/
public T fromByteBuffer(ByteBuffer buffer, int numBytes);
public byte[] toBytes(T val);
diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
index 4f6f6a0f67f..487c9544f92 100644
--- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
+++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
@@ -144,7 +144,7 @@ public class IncrementalIndex implements Iterable
/**
* Adds a new row. The row might correspond with another row that already exists, in which case this will
* update that row instead of inserting a new one.
- *
+ *
*
* Calls to add() are thread safe.
*
diff --git a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java
index 2ac3b309f62..439adb5886a 100644
--- a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java
+++ b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java
@@ -40,6 +40,7 @@ public abstract class ComplexMetricSerde
* added to the builder.
*
* @param buffer the buffer to deserialize
+ * @param builder ColumnBuilder to add the column to
* @return a ColumnPartSerde that can serialize out the object that was read from the buffer to the builder
*/
public abstract ColumnPartSerde deserializeColumn(ByteBuffer buffer, ColumnBuilder builder);
diff --git a/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java b/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java
index c2e43e3917a..86657e6216a 100644
--- a/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java
+++ b/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java
@@ -49,14 +49,14 @@ import java.util.concurrent.LinkedBlockingQueue;
/**
* A FirehoseFactory for RabbitMQ.
- *
+ *
* It will receive it's configuration through the realtime.spec file and expects to find a
* consumerProps element in the firehose definition with values for a number of configuration options.
* Below is a complete example for a RabbitMQ firehose configuration with some explanation. Options
* that have defaults can be skipped but options with no defaults must be specified with the exception
* of the URI property. If the URI property is set, it will override any other property that was also
* set.
- *
+ *
* File: realtime.spec
*
* "firehose" : {
@@ -88,7 +88,7 @@ import java.util.concurrent.LinkedBlockingQueue;
* }
* },
*
- *
+ *
* Limitations: This implementation will not attempt to reconnect to the MQ broker if the
* connection to it is lost. Furthermore it does not support any automatic failover on high availability
* RabbitMQ clusters. This is not supported by the underlying AMQP client library and while the behavior
@@ -96,7 +96,7 @@ import java.util.concurrent.LinkedBlockingQueue;
* the RabbitMQ cluster that sets the "ha-mode" and "ha-sync-mode" properly on the queue that this
* Firehose connects to, messages should survive an MQ broker node failure and be delivered once a
* connection to another node is set up.
- *
+ *
* For more information on RabbitMQ high availability please see:
* http://www.rabbitmq.com/ha.html.
*/
diff --git a/server/src/main/java/io/druid/curator/announcement/Announcer.java b/server/src/main/java/io/druid/curator/announcement/Announcer.java
index a0f396d4396..a9a8c3e3ffe 100644
--- a/server/src/main/java/io/druid/curator/announcement/Announcer.java
+++ b/server/src/main/java/io/druid/curator/announcement/Announcer.java
@@ -318,10 +318,10 @@ public class Announcer
/**
* Unannounces an announcement created at path. Note that if all announcements get removed, the Announcer
* will continue to have ZK watches on paths because clearing them out is a source of ugly race conditions.
- *
+ *
* If you need to completely clear all the state of what is being watched and announced, stop() the Announcer.
*
- * @param path
+ * @param path the path to unannounce
*/
public void unannounce(String path)
{
diff --git a/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java b/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java
index fdadea9292d..c84f086d25a 100644
--- a/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java
+++ b/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java
@@ -54,7 +54,7 @@ import java.util.concurrent.ThreadFactory;
/**
* The DiscoveryModule allows for the registration of Keys of DruidNode objects, which it intends to be
* automatically announced at the end of the lifecycle start.
- *
+ *
* In order for this to work a ServiceAnnouncer instance *must* be injected and instantiated first.
* This can often be achieved by registering ServiceAnnouncer.class with the LifecycleModule.
*/
@@ -64,10 +64,12 @@ public class DiscoveryModule implements Module
/**
* Requests that the un-annotated DruidNode instance be injected and published as part of the lifecycle.
- *
+ *
* That is, this module will announce the DruidNode instance returned by
* injector.getInstance(Key.get(DruidNode.class)) automatically.
* Announcement will happen in the LAST stage of the Lifecycle
+ *
+ * @param binder the Binder to register with
*/
public static void registerDefault(Binder binder)
{
@@ -76,7 +78,7 @@ public class DiscoveryModule implements Module
/**
* Requests that the annotated DruidNode instance be injected and published as part of the lifecycle.
- *
+ *
* That is, this module will announce the DruidNode instance returned by
* injector.getInstance(Key.get(DruidNode.class, annotation)) automatically.
* Announcement will happen in the LAST stage of the Lifecycle
@@ -90,11 +92,12 @@ public class DiscoveryModule implements Module
/**
* Requests that the annotated DruidNode instance be injected and published as part of the lifecycle.
- *
+ *
* That is, this module will announce the DruidNode instance returned by
* injector.getInstance(Key.get(DruidNode.class, annotation)) automatically.
* Announcement will happen in the LAST stage of the Lifecycle
*
+ * @param binder the Binder to register with
* @param annotation The annotation class to use in finding the DruidNode instance
*/
public static void register(Binder binder, Class extends Annotation> annotation)
@@ -104,11 +107,12 @@ public class DiscoveryModule implements Module
/**
* Requests that the keyed DruidNode instance be injected and published as part of the lifecycle.
- *
+ *
* That is, this module will announce the DruidNode instance returned by
* injector.getInstance(Key.get(DruidNode.class, annotation)) automatically.
* Announcement will happen in the LAST stage of the Lifecycle
*
+ * @param binder the Binder to register with
* @param key The key to use in finding the DruidNode instance
*/
public static void registerKey(Binder binder, Key key)
diff --git a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
index dcff14024e3..d32bdd147f3 100644
--- a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
+++ b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java
@@ -44,10 +44,10 @@ import java.util.concurrent.atomic.AtomicReference;
/**
* An InventoryManager watches updates to inventory on Zookeeper (or some other discovery-like service publishing
* system). It is built up on two object types: containers and inventory objects.
- *
+ *
* The logic of the InventoryManager just maintains a local cache of the containers and inventory it sees on ZK. It
* provides methods for getting at the container objects, which house the actual individual pieces of inventory.
- *
+ *
* A Strategy is provided to the constructor of an Inventory manager, this strategy provides all of the
* object-specific logic to serialize, deserialize, compose and alter the container and inventory objects.
*/
diff --git a/server/src/main/java/io/druid/initialization/Initialization.java b/server/src/main/java/io/druid/initialization/Initialization.java
index 84feb11bbc3..9d88e9d9f75 100644
--- a/server/src/main/java/io/druid/initialization/Initialization.java
+++ b/server/src/main/java/io/druid/initialization/Initialization.java
@@ -102,7 +102,7 @@ public class Initialization
/**
* @param clazz Module class
- * @param
+ * @param Module type
*
* @return Returns the set of modules loaded.
*/
diff --git a/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java b/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java
index c57c0575e91..263d820e8b4 100644
--- a/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java
+++ b/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java
@@ -40,10 +40,19 @@ import java.util.SortedSet;
})
public interface GranularitySpec
{
- /** Set of all time groups, broken up on segment boundaries. Should be sorted by interval start and non-overlapping.*/
- public Optional> bucketIntervals();
+ /**
+ * Set of all time groups, broken up on segment boundaries. Should be sorted by interval start and non-overlapping.
+ *
+ * @return set of all time groups
+ */
+ public Optional> bucketIntervals();
- /** Time-grouping interval corresponding to some instant, if any. */
+ /**
+ * Time-grouping interval corresponding to some instant, if any.
+ *
+ * @param dt instant to return time interval for
+ * @return optional time interval
+ * */
public Optional bucketInterval(DateTime dt);
public Granularity getSegmentGranularity();
diff --git a/server/src/main/java/io/druid/segment/realtime/FireDepartment.java b/server/src/main/java/io/druid/segment/realtime/FireDepartment.java
index 3172bcb6f44..9e410b4d35e 100644
--- a/server/src/main/java/io/druid/segment/realtime/FireDepartment.java
+++ b/server/src/main/java/io/druid/segment/realtime/FireDepartment.java
@@ -39,7 +39,7 @@ import java.io.IOException;
/**
* A Fire Department has a Firehose and a Plumber.
- *
+ *
* This is a metaphor for a realtime stream (Firehose) and a coordinator of sinks (Plumber). The Firehose provides the
* realtime stream of data. The Plumber directs each drop of water from the firehose into the correct sink and makes
* sure that the sinks don't overflow.
diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java
index 98667948487..e48dbf7ac0f 100644
--- a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java
+++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java
@@ -663,6 +663,9 @@ public class RealtimePlumber implements Plumber
* Unannounces a given sink and removes all local references to it. It is important that this is only called
* from the single-threaded mergeExecutor, since otherwise chaos may ensue if merged segments are deleted while
* being created.
+ *
+ * @param truncatedTime sink key
+ * @param sink sink to unannounce
*/
protected void abandonSegment(final long truncatedTime, final Sink sink)
{
@@ -700,9 +703,9 @@ public class RealtimePlumber implements Plumber
/**
* Persists the given hydrant and returns the number of rows persisted
*
- * @param indexToPersist
- * @param schema
- * @param interval
+ * @param indexToPersist hydrant to persist
+ * @param schema datasource schema
+ * @param interval interval to persist
*
* @return the number of rows persisted
*/