From ac05836833d6d945d2446bd72a1443dd5d0d6a52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Xavier=20L=C3=A9aut=C3=A9?= Date: Fri, 29 Aug 2014 13:58:50 -0700 Subject: [PATCH] make Java 8 javadoc happy --- .../collections/OrderedMergeIterator.java | 6 +- .../collections/OrderedMergeSequence.java | 6 +- .../timeline/VersionedIntervalTimeline.java | 8 +-- .../examples/rand/RandomFirehoseFactory.java | 5 +- .../TwitterSpritzerFirehoseFactory.java | 11 +-- .../histogram/ApproximateHistogram.java | 72 +++++++++---------- .../druid/indexer/DeterminePartitionsJob.java | 4 +- .../io/druid/indexing/common/TaskStatus.java | 10 ++- .../indexing/common/task/AbstractTask.java | 5 +- .../indexing/common/task/HadoopIndexTask.java | 2 +- .../io/druid/indexing/common/task/Task.java | 8 ++- .../indexing/common/task/TaskResource.java | 4 +- .../overlord/IndexerDBCoordinator.java | 2 + .../indexing/overlord/RemoteTaskRunner.java | 6 +- .../druid/indexing/overlord/TaskLockbox.java | 7 ++ .../io/druid/indexing/overlord/TaskQueue.java | 5 +- .../druid/indexing/overlord/TaskRunner.java | 2 + .../druid/indexing/overlord/TaskStorage.java | 32 ++++++++- .../overlord/TaskStorageQueryAdapter.java | 3 + .../indexing/overlord/setup/EC2UserData.java | 3 + .../indexing/worker/WorkerTaskMonitor.java | 2 +- .../query/ChainedExecutionQueryRunner.java | 8 +-- .../src/main/java/io/druid/query/Druids.java | 44 ++++++------ .../io/druid/query/QuerySegmentWalker.java | 6 +- .../java/io/druid/query/QueryToolChest.java | 4 +- .../java/io/druid/query/TimewarpOperator.java | 2 +- .../query/aggregation/AggregatorFactory.java | 4 +- .../query/aggregation/AggregatorUtil.java | 2 + .../io/druid/query/aggregation/Histogram.java | 2 + .../hyperloglog/HyperLogLogCollector.java | 18 ++--- .../groupby/having/GreaterThanHavingSpec.java | 2 +- .../groupby/having/LessThanHavingSpec.java | 2 +- .../io/druid/query/topn/TopNQueryBuilder.java | 6 +- .../io/druid/segment/DimensionSelector.java | 22 +++--- .../java/io/druid/segment/IndexMerger.java | 3 +- .../java/io/druid/segment/QueryableIndex.java | 2 +- .../io/druid/segment/data/GenericIndexed.java | 15 ++-- .../java/io/druid/segment/data/Indexed.java | 3 - .../io/druid/segment/data/ObjectStrategy.java | 2 +- .../segment/incremental/IncrementalIndex.java | 2 +- .../segment/serde/ComplexMetricSerde.java | 1 + .../rabbitmq/RabbitMQFirehoseFactory.java | 8 +-- .../druid/curator/announcement/Announcer.java | 4 +- .../curator/discovery/DiscoveryModule.java | 14 ++-- .../inventory/CuratorInventoryManager.java | 4 +- .../druid/initialization/Initialization.java | 2 +- .../indexing/granularity/GranularitySpec.java | 15 +++- .../segment/realtime/FireDepartment.java | 2 +- .../realtime/plumber/RealtimePlumber.java | 9 ++- 49 files changed, 248 insertions(+), 163 deletions(-) diff --git a/common/src/main/java/io/druid/collections/OrderedMergeIterator.java b/common/src/main/java/io/druid/collections/OrderedMergeIterator.java index bdf5512e894..7cfc72adefc 100644 --- a/common/src/main/java/io/druid/collections/OrderedMergeIterator.java +++ b/common/src/main/java/io/druid/collections/OrderedMergeIterator.java @@ -34,13 +34,13 @@ import java.util.PriorityQueue; /** * An OrderedMergeIterator is an iterator that merges together multiple sorted iterators. It is written assuming * that the input Iterators are provided in order. That is, it places an extra restriction in the input iterators. - *

+ * * Normally a merge operation could operate with the actual input iterators in any order as long as the actual values * in the iterators are sorted. This requires that not only the individual values be sorted, but that the iterators * be provided in the order of the first element of each iterator. - *

+ * * If this doesn't make sense, check out OrderedMergeIteratorTest.testScrewsUpOnOutOfOrderBeginningOfList() - *

+ * * It places this extra restriction on the input data in order to implement an optimization that allows it to * remain as lazy as possible in the face of a common case where the iterators are just appended one after the other. */ diff --git a/common/src/main/java/io/druid/collections/OrderedMergeSequence.java b/common/src/main/java/io/druid/collections/OrderedMergeSequence.java index cfc29682d0d..ce8e0d93242 100644 --- a/common/src/main/java/io/druid/collections/OrderedMergeSequence.java +++ b/common/src/main/java/io/druid/collections/OrderedMergeSequence.java @@ -38,13 +38,13 @@ import java.util.PriorityQueue; /** * An OrderedMergeIterator is an iterator that merges together multiple sorted iterators. It is written assuming * that the input Iterators are provided in order. That is, it places an extra restriction in the input iterators. - *

+ * * Normally a merge operation could operate with the actual input iterators in any order as long as the actual values * in the iterators are sorted. This requires that not only the individual values be sorted, but that the iterators * be provided in the order of the first element of each iterator. - *

+ * * If this doesn't make sense, check out OrderedMergeSequenceTest.testScrewsUpOnOutOfOrderBeginningOfList() - *

+ * * It places this extra restriction on the input data in order to implement an optimization that allows it to * remain as lazy as possible in the face of a common case where the iterators are just appended one after the other. */ diff --git a/common/src/main/java/io/druid/timeline/VersionedIntervalTimeline.java b/common/src/main/java/io/druid/timeline/VersionedIntervalTimeline.java index 913ec8fffd9..903480e2dac 100644 --- a/common/src/main/java/io/druid/timeline/VersionedIntervalTimeline.java +++ b/common/src/main/java/io/druid/timeline/VersionedIntervalTimeline.java @@ -42,17 +42,17 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; /** * VersionedIntervalTimeline is a data structure that manages objects on a specific timeline. - *

+ * * It associates a jodatime Interval and a generically-typed version with the object that is being stored. - *

+ * * In the event of overlapping timeline entries, timeline intervals may be chunked. The underlying data associated * with a timeline entry remains unchanged when chunking occurs. - *

+ * * After loading objects via the add() method, the lookup(Interval) method can be used to get the list of the most * recent objects (according to the version) that match the given interval. The intent is that objects represent * a certain time period and when you do a lookup(), you are asking for all of the objects that you need to look * at in order to get a correct answer about that time period. - *

+ * * The findOvershadowed() method returns a list of objects that will never be returned by a call to lookup() because * they are overshadowed by some other object. This can be used in conjunction with the add() and remove() methods * to achieve "atomic" updates. First add new items, then check if those items caused anything to be overshadowed, if diff --git a/examples/src/main/java/io/druid/examples/rand/RandomFirehoseFactory.java b/examples/src/main/java/io/druid/examples/rand/RandomFirehoseFactory.java index e395cf02dcb..4f9940a77a7 100644 --- a/examples/src/main/java/io/druid/examples/rand/RandomFirehoseFactory.java +++ b/examples/src/main/java/io/druid/examples/rand/RandomFirehoseFactory.java @@ -45,6 +45,7 @@ import static java.lang.Thread.sleep; * The generated tuples can be thought of as asynchronously * produced triples (timestamp, outColumn, target) where the timestamp varies depending on * speed of processing. + * *

* InputRows are produced as fast as requested, so this can be used to determine the * upper rate of ingest if sleepUsec is set to 0; nTokens specifies how many associated @@ -55,8 +56,8 @@ import static java.lang.Thread.sleep; * the moment an event is delivered.) * Values are offset by adding the modulus of the token number to the random number * so that token values have distinct, non-overlapping ranges. - *

*

+ * * Example spec file: *
  * [{
@@ -82,7 +83,7 @@ import static java.lang.Thread.sleep;
  * "basePersistDirectory" : "/tmp/realtime/basePersist" }
  * }]
  * 
- *

+ * * Example query using POST to /druid/v2/ (where UTC date and time MUST include the current hour): *

  * {
diff --git a/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java b/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java
index 6755ee0a933..cc222d8098b 100644
--- a/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java
+++ b/examples/src/main/java/io/druid/examples/twitter/TwitterSpritzerFirehoseFactory.java
@@ -58,18 +58,11 @@ import static java.lang.Thread.sleep;
  * with timestamps along with ??.
  * The generated tuples have the form (timestamp, ????)
  * where the timestamp is from the twitter event.
- * 

- *

- *

+ * * Example spec file: - *
- * 
- *

+ * * Example query using POST to /druid/v2/?w (where w is an arbitrary parameter and the date and time * is UTC): - *

- * 
- * * * Notes on twitter.com HTTP (REST) API: v1.0 will be disabled around 2013-03 so v1.1 should be used; * twitter4j 3.0 (not yet released) will support the v1.1 api. diff --git a/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java b/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java index fc06968f870..b44fbd524e1 100644 --- a/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java +++ b/histogram/src/main/java/io/druid/query/aggregation/histogram/ApproximateHistogram.java @@ -253,9 +253,7 @@ public class ApproximateHistogram } /** - * Returns a string representation of the actual bin counts - * - * @return + * @return a string representation of the actual bin counts */ protected String getBinsString() { @@ -385,7 +383,7 @@ public class ApproximateHistogram /** * Merges the bin in the given position with the next bin * - * @param index index of the bin to merge, index must satisfy 0 <= index < binCount - 1 + * @param index index of the bin to merge, index must satisfy 0 <= index < binCount - 1 */ protected void merge(final int index) { @@ -506,9 +504,9 @@ public class ApproximateHistogram /** * Copies histogram h into the current histogram. * - * @param h + * @param h ApproximateHistogram to copy * - * @return + * @return this histogram */ public ApproximateHistogram copy(ApproximateHistogram h) { @@ -815,20 +813,20 @@ public class ApproximateHistogram /** * mergeBins performs the given number of bin merge operations on the given histogram - *

+ * * It repeatedly merges the two closest bins until it has performed the requested number of merge operations. * Merges are done in-place and unused bins have unknown state - *

+ * * next / prev maintains a doubly-linked list of valid bin indices into the mergedBins array. - *

+ * * Fast operation is achieved by building a min-heap of the deltas as opposed to repeatedly * scanning the array of deltas to find the minimum. A reverse index into the heap is maintained * to allow deleting and updating of specific deltas. - *

+ * * next and prev arrays are used to maintain indices to the previous / next valid bin from a given bin index - *

+ * * Its effect is equivalent to running the following code: - *

+ * *

    *   ApproximateHistogram merged = new ApproximateHistogram(mergedBinCount, mergedPositions, mergedBins);
    *
@@ -1104,7 +1102,7 @@ public class ApproximateHistogram
   /**
    * Returns a byte-array representation of this ApproximateHistogram object
    *
-   * @return
+   * @return byte array representation
    */
   @JsonValue
   public byte[] toBytes()
@@ -1181,7 +1179,7 @@ public class ApproximateHistogram
   /**
    * Writes the representation of this ApproximateHistogram object to the given byte-buffer
    *
-   * @param buf
+   * @param buf ByteBuffer to write the ApproximateHistogram to
    */
   public void toBytes(ByteBuffer buf)
   {
@@ -1196,10 +1194,10 @@ public class ApproximateHistogram
 
   /**
    * Writes the dense representation of this ApproximateHistogram object to the given byte-buffer
-   * 

+ * * Requires 16 + 12 * size bytes of storage * - * @param buf + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesDense(ByteBuffer buf) { @@ -1217,10 +1215,10 @@ public class ApproximateHistogram /** * Writes the sparse representation of this ApproximateHistogram object to the given byte-buffer - *

+ * * Requires 16 + 12 * binCount bytes of storage * - * @param buf ByteBuffer to write object to + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesSparse(ByteBuffer buf) { @@ -1239,10 +1237,10 @@ public class ApproximateHistogram /** * Returns a compact byte-buffer representation of this ApproximateHistogram object * storing actual values as opposed to histogram bins - *

- * Requires 3 + 4 * count bytes of storage with count <= 127 + * + * Requires 3 + 4 * count bytes of storage with count <= 127 * - * @param buf + * @param buf ByteBuffer to write the ApproximateHistogram to */ public void toBytesCompact(ByteBuffer buf) { @@ -1285,9 +1283,9 @@ public class ApproximateHistogram /** * Constructs an Approximate Histogram object from the given byte-array representation * - * @param bytes + * @param bytes byte array to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given byte array */ public static ApproximateHistogram fromBytes(byte[] bytes) { @@ -1298,9 +1296,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given dense byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesDense(ByteBuffer buf) { @@ -1324,9 +1322,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given dense byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesSparse(ByteBuffer buf) { @@ -1352,9 +1350,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given compact byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytesCompact(ByteBuffer buf) { @@ -1428,9 +1426,9 @@ public class ApproximateHistogram /** * Constructs an ApproximateHistogram object from the given byte-buffer representation * - * @param buf + * @param buf ByteBuffer to construct an ApproximateHistogram from * - * @return + * @return ApproximateHistogram constructed from the given ByteBuffer */ public static ApproximateHistogram fromBytes(ByteBuffer buf) { @@ -1454,7 +1452,7 @@ public class ApproximateHistogram /** * Returns the approximate number of items less than or equal to b in the histogram * - * @param b + * @param b the cutoff * * @return the approximate number of items less than or equal to b */ @@ -1521,7 +1519,7 @@ public class ApproximateHistogram * probabilities = [.5f] returns [median] * probabilities = [.25f, .5f, .75f] returns the quartiles, [25%ile, median, 75%ile] * - * @param probabilities + * @param probabilities array of probabilities * * @return an array of length probabilities.length representing the the approximate sample quantiles * corresponding to the given probabilities @@ -1584,9 +1582,9 @@ public class ApproximateHistogram /** * Computes a visual representation of the approximate histogram with bins laid out according to the given breaks * - * @param breaks + * @param breaks breaks defining the histogram bins * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(final float[] breaks) { @@ -1607,7 +1605,7 @@ public class ApproximateHistogram * * @param size number of equal-sized bins to divide the histogram into * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(int size) { @@ -1629,7 +1627,7 @@ public class ApproximateHistogram * @param bucketSize the size of each bucket * @param offset the location of one breakpoint * - * @return + * @return visual representation of the histogram */ public Histogram toHistogram(final float bucketSize, final float offset) { diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java index 818dc9b3868..3012766f509 100644 --- a/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java +++ b/indexing-hadoop/src/main/java/io/druid/indexer/DeterminePartitionsJob.java @@ -81,13 +81,13 @@ import java.util.Map; /** * Determines appropriate ShardSpecs for a job by determining whether or not partitioning is necessary, and if so, * choosing the best dimension that satisfies the criteria: - *

+ * *

    *
  • Must have exactly one value per row.
  • *
  • Must not generate oversized partitions. A dimension with N rows having the same value will necessarily * put all those rows in the same partition, and that partition may be much larger than the target size.
  • *
- *

+ * * "Best" means a very high cardinality dimension, or, if none exist, the dimension that minimizes variation of * segment size relative to the target. */ diff --git a/indexing-service/src/main/java/io/druid/indexing/common/TaskStatus.java b/indexing-service/src/main/java/io/druid/indexing/common/TaskStatus.java index e45b37472ad..3ff47f8970c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/TaskStatus.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/TaskStatus.java @@ -28,7 +28,7 @@ import com.google.common.base.Preconditions; /** * Represents the status of a task from the perspective of the coordinator. The task may be ongoing * ({@link #isComplete()} false) or it may be complete ({@link #isComplete()} true). - *

+ * * TaskStatus objects are immutable. */ public class TaskStatus @@ -101,6 +101,8 @@ public class TaskStatus /** * Signals that a task is not yet complete, and is still runnable on a worker. Exactly one of isRunnable, * isSuccess, or isFailure will be true at any one time. + * + * @return whether the task is runnable. */ @JsonIgnore public boolean isRunnable() @@ -110,6 +112,8 @@ public class TaskStatus /** * Inverse of {@link #isRunnable}. + * + * @return whether the task is complete. */ @JsonIgnore public boolean isComplete() @@ -120,6 +124,8 @@ public class TaskStatus /** * Returned by tasks when they spawn subtasks. Exactly one of isRunnable, isSuccess, or isFailure will * be true at any one time. + * + * @return whether the task succeeded. */ @JsonIgnore public boolean isSuccess() @@ -130,6 +136,8 @@ public class TaskStatus /** * Returned by tasks when they complete unsuccessfully. Exactly one of isRunnable, isSuccess, or * isFailure will be true at any one time. + * + * @return whether the task failed */ @JsonIgnore public boolean isFailure() diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/AbstractTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/AbstractTask.java index 959bb548f43..0062458585c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/AbstractTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/AbstractTask.java @@ -124,7 +124,10 @@ public abstract class AbstractTask implements Task } /** - * Start helper methods * + * Start helper methods + * + * @param objects objects to join + * @return string of joined objects */ public static String joinId(Object... objects) { diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/HadoopIndexTask.java b/indexing-service/src/main/java/io/druid/indexing/common/task/HadoopIndexTask.java index 09f850eaeae..f5bccea9630 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/HadoopIndexTask.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/HadoopIndexTask.java @@ -85,7 +85,7 @@ public class HadoopIndexTask extends AbstractTask /** * @param spec is used by the HadoopDruidIndexerJob to set up the appropriate parameters * for creating Druid index segments. It may be modified. - *

+ * * Here, we will ensure that the DbConnectorConfig field of the spec is set to null, such that the * job does not push a list of published segments the database. Instead, we will use the method * IndexGeneratorJob.getPublishedSegments() to simply return a list of the published diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/Task.java b/indexing-service/src/main/java/io/druid/indexing/common/task/Task.java index f554d968b01..e5db129128c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/Task.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/Task.java @@ -59,23 +59,27 @@ public interface Task { /** * Returns ID of this task. Must be unique across all tasks ever created. + * @return task ID */ public String getId(); /** * Returns group ID of this task. Tasks with the same group ID can share locks. If tasks do not need to share locks, * a common convention is to set group ID equal to task ID. + * @return task group ID */ public String getGroupId(); /** * Returns a {@link io.druid.indexing.common.task.TaskResource} for this task. Task resources define specific * worker requirements a task may require. + * @return {@link io.druid.indexing.common.task.TaskResource} for this task */ public TaskResource getTaskResource(); /** * Returns a descriptive label for this task type. Used for metrics emission and logging. + * @return task type label */ public String getType(); @@ -95,6 +99,8 @@ public interface Task /** * Returns query runners for this task. If this task is not meant to answer queries over its datasource, this method * should return null. + * @param query result type + * @return query runners for this task */ public QueryRunner getQueryRunner(Query query); @@ -128,7 +134,7 @@ public interface Task * * @return Some kind of finished status (isRunnable must be false). * - * @throws Exception + * @throws Exception if this task failed */ public TaskStatus run(TaskToolbox toolbox) throws Exception; } diff --git a/indexing-service/src/main/java/io/druid/indexing/common/task/TaskResource.java b/indexing-service/src/main/java/io/druid/indexing/common/task/TaskResource.java index d5a57b40484..5e99508709e 100644 --- a/indexing-service/src/main/java/io/druid/indexing/common/task/TaskResource.java +++ b/indexing-service/src/main/java/io/druid/indexing/common/task/TaskResource.java @@ -43,6 +43,8 @@ public class TaskResource * Returns availability group ID of this task. Tasks the same availability group cannot be assigned to the same * worker. If tasks do not have this restriction, a common convention is to set the availability group ID to the * task ID. + * + * @return task availability group */ @JsonProperty public String getAvailabilityGroup() @@ -52,7 +54,7 @@ public class TaskResource /** - * Returns the number of worker slots this task will take. + * @return the number of worker slots this task will take */ @JsonProperty public int getRequiredCapacity() diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/IndexerDBCoordinator.java b/indexing-service/src/main/java/io/druid/indexing/overlord/IndexerDBCoordinator.java index 986a5696d5b..283f47ab2c6 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/IndexerDBCoordinator.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/IndexerDBCoordinator.java @@ -140,6 +140,8 @@ public class IndexerDBCoordinator * * @param segments set of segments to add * @return set of segments actually added + * + * @throws java.io.IOException if a database error occurs */ public Set announceHistoricalSegments(final Set segments) throws IOException { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java index 89cc2ce9d73..a36e6381678 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/RemoteTaskRunner.java @@ -88,13 +88,13 @@ import java.util.concurrent.TimeUnit; * creating ephemeral nodes in ZK that workers must remove. Workers announce the statuses of the tasks they are running. * Once a task completes, it is up to the RTR to remove the task status and run any necessary cleanup. * The RemoteTaskRunner is event driven and updates state according to ephemeral node changes in ZK. - *

+ * * The RemoteTaskRunner will assign tasks to a node until the node hits capacity. At that point, task assignment will * fail. The RemoteTaskRunner depends on another component to create additional worker resources. * For example, {@link io.druid.indexing.overlord.scaling.ResourceManagementScheduler} can take care of these duties. - *

+ * * If a worker node becomes inexplicably disconnected from Zk, the RemoteTaskRunner will fail any tasks associated with the worker. - *

+ * * The RemoteTaskRunner uses ZK for job management and assignment and http for IPC messages. */ public class RemoteTaskRunner implements TaskRunner, TaskLogStreamer diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java index 7b3ffb09087..28506499f4c 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskLockbox.java @@ -161,6 +161,12 @@ public class TaskLockbox /** * Acquires a lock on behalf of a task. Blocks until the lock is acquired. Throws an exception if the lock * cannot be acquired. + * + * @param task task to acquire lock for + * @param interval interval to lock + * @return acquired TaskLock + * + * @throws java.lang.InterruptedException if the lock cannot be acquired */ public TaskLock lock(final Task task, final Interval interval) throws InterruptedException { @@ -296,6 +302,7 @@ public class TaskLockbox * Return the currently-active locks for some task. * * @param task task for which to locate locks + * @return currently-active locks for the given task */ public List findLocksForTask(final Task task) { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java index 29c265dc067..7bf500adcd3 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskQueue.java @@ -59,11 +59,11 @@ import java.util.concurrent.locks.ReentrantLock; /** * Interface between task producers and the task runner. - *

+ * * This object accepts tasks from producers using {@link #add} and manages delivery of these tasks to a * {@link TaskRunner}. Tasks will run in a mostly-FIFO order, with deviations when the natural next task is not ready * in time (based on its {@link Task#isReady} method). - *

+ * * For persistence, we save all new tasks and task status changes using a {@link TaskStorage} object. */ public class TaskQueue @@ -295,6 +295,7 @@ public class TaskQueue * @param task task to add * * @return true + * @throws TaskExistsException if the task already exists */ public boolean add(final Task task) throws TaskExistsException { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunner.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunner.java index 0b4b5e3ff89..f380d6b5788 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunner.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskRunner.java @@ -42,6 +42,8 @@ public interface TaskRunner /** * Inform the task runner it can clean up any resources associated with a task. This implies shutdown of any * currently-running tasks. + * + * @param taskid task ID to clean up resources for */ public void shutdown(String taskid); diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorage.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorage.java index c783c582896..45ee3864143 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorage.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorage.java @@ -32,24 +32,33 @@ public interface TaskStorage /** * Adds a task to the storage facility with a particular status. * + * @param task task to add + * @param status task status * @throws io.druid.indexing.overlord.TaskExistsException if the task ID already exists */ public void insert(Task task, TaskStatus status) throws TaskExistsException; /** * Persists task status in the storage facility. This method should throw an exception if the task status lifecycle - * is not respected (absent -> RUNNING -> SUCCESS/FAILURE). + * is not respected (absent -> RUNNING -> SUCCESS/FAILURE). + * + * @param status task status */ public void setStatus(TaskStatus status); /** * Persists lock state in the storage facility. + * @param taskid task ID + * @param taskLock lock state */ public void addLock(String taskid, TaskLock taskLock); /** * Removes lock state from the storage facility. It is harmless to keep old locks in the storage facility, but * this method can help reclaim wasted space. + * + * @param taskid task ID + * @param taskLock lock state */ public void removeLock(String taskid, TaskLock taskLock); @@ -58,28 +67,44 @@ public interface TaskStorage * absentee Optional. * * NOTE: This method really feels like it should be combined with {@link #getStatus}. Expect that in the future. + * + * @param taskid task ID + * @return optional task */ public Optional getTask(String taskid); /** * Returns task status as stored in the storage facility. If the task ID does not exist, this will return * an absentee Optional. + * + * @param taskid task ID + * @return task status */ public Optional getStatus(String taskid); /** * Add an action taken by a task to the audit log. + * + * @param task task to record action for + * @param taskAction task action to record + * + * @param task action return type */ public void addAuditLog(Task task, TaskAction taskAction); /** * Returns all actions taken by a task. + * + * @param taskid task ID + * @return list of task actions */ public List getAuditLogs(String taskid); /** * Returns a list of currently running or pending tasks as stored in the storage facility. No particular order * is guaranteed, but implementations are encouraged to return tasks in ascending order of creation. + * + * @return list of active tasks */ public List getActiveTasks(); @@ -87,11 +112,16 @@ public interface TaskStorage * Returns a list of recently finished task statuses as stored in the storage facility. No particular order * is guaranteed, but implementations are encouraged to return tasks in descending order of creation. No particular * standard of "recent" is guaranteed, and in fact, this method is permitted to simply return nothing. + * + * @return list of recently finished tasks */ public List getRecentlyFinishedTaskStatuses(); /** * Returns a list of locks for a particular task. + * + * @param taskid task ID + * @return list of TaskLocks for the given task */ public List getLocks(String taskid); } diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorageQueryAdapter.java b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorageQueryAdapter.java index 67ea11dcf33..3bb8e4a9192 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorageQueryAdapter.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/TaskStorageQueryAdapter.java @@ -73,6 +73,9 @@ public class TaskStorageQueryAdapter * This method is useful when you want to figure out all of the things a single task spawned. It does pose issues * with the result set perhaps growing boundlessly and we do not do anything to protect against that. Use at your * own risk and know that at some point, we might adjust this to actually enforce some sort of limits. + * + * @param taskid task ID + * @return set of segments created by the specified task */ public Set getInsertedSegments(final String taskid) { diff --git a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EC2UserData.java b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EC2UserData.java index 3568bf45b7b..550aafc27e3 100644 --- a/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EC2UserData.java +++ b/indexing-service/src/main/java/io/druid/indexing/overlord/setup/EC2UserData.java @@ -35,6 +35,9 @@ public interface EC2UserData /** * Return a copy of this instance with a different worker version. If no changes are needed (possibly because the * user data does not depend on the worker version) then it is OK to return "this". + * + * @param version worker version + * @return instance with the specified version */ public EC2UserData withVersion(String version); diff --git a/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java b/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java index 3e31461cecf..5256c374a49 100644 --- a/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java +++ b/indexing-service/src/main/java/io/druid/indexing/worker/WorkerTaskMonitor.java @@ -41,7 +41,7 @@ import java.util.concurrent.ExecutorService; /** * The monitor watches ZK at a specified path for new tasks to appear. Upon starting the monitor, a listener will be * created that waits for new tasks. Tasks are executed as soon as they are seen. - *

+ * * The monitor implements {@link io.druid.query.QuerySegmentWalker} so tasks can offer up queryable data. This is useful for * realtime index tasks. */ diff --git a/processing/src/main/java/io/druid/query/ChainedExecutionQueryRunner.java b/processing/src/main/java/io/druid/query/ChainedExecutionQueryRunner.java index 8a5ed51a4df..77ca1b32fe1 100644 --- a/processing/src/main/java/io/druid/query/ChainedExecutionQueryRunner.java +++ b/processing/src/main/java/io/druid/query/ChainedExecutionQueryRunner.java @@ -47,14 +47,14 @@ import java.util.concurrent.TimeoutException; /** * A QueryRunner that combines a list of other QueryRunners and executes them in parallel on an executor. - *

+ * * When using this, it is important to make sure that the list of QueryRunners provided is fully flattened. * If, for example, you were to pass a list of a Chained QueryRunner (A) and a non-chained QueryRunner (B). Imagine * A has 2 QueryRunner chained together (Aa and Ab), the fact that the Queryables are run in parallel on an * executor would mean that the Queryables are actually processed in the order - *

- * A -> B -> Aa -> Ab - *

+ * + *

A -> B -> Aa -> Ab
+ * * That is, the two sub queryables for A would run *after* B is run, effectively meaning that the results for B * must be fully cached in memory before the results for Aa and Ab are computed. */ diff --git a/processing/src/main/java/io/druid/query/Druids.java b/processing/src/main/java/io/druid/query/Druids.java index ff03e8db7f7..fc475a2ba90 100644 --- a/processing/src/main/java/io/druid/query/Druids.java +++ b/processing/src/main/java/io/druid/query/Druids.java @@ -59,9 +59,9 @@ public class Druids /** * A Builder for AndDimFilter. - *

+ * * Required: fields() must be called before build() - *

+ * * Usage example: *


    *   AndDimFilter andDimFilter = Druids.newAndDimFilterBuilder()
@@ -105,9 +105,9 @@ public class Druids
 
   /**
    * A Builder for OrDimFilter.
-   * 

+ * * Required: fields() must be called before build() - *

+ * * Usage example: *


    *   OrDimFilter orDimFilter = Druids.newOrDimFilterBuilder()
@@ -160,9 +160,9 @@ public class Druids
 
   /**
    * A Builder for NotDimFilter.
-   * 

+ * * Required: field() must be called before build() - *

+ * * Usage example: *


    *   NotDimFilter notDimFilter = Druids.newNotDimFilterBuilder()
@@ -206,9 +206,9 @@ public class Druids
 
   /**
    * A Builder for SelectorDimFilter.
-   * 

+ * * Required: dimension() and value() must be called before build() - *

+ * * Usage example: *


    *   Selector selDimFilter = Druids.newSelectorDimFilterBuilder()
@@ -285,10 +285,10 @@ public class Druids
 
   /**
    * A Builder for TimeseriesQuery.
-   * 

+ * * Required: dataSource(), intervals(), and aggregators() must be called before build() * Optional: filters(), granularity(), postAggregators(), and context() can be called before build() - *

+ * * Usage example: *


    *   TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
@@ -483,11 +483,11 @@ public class Druids
 
   /**
    * A Builder for SearchQuery.
-   * 

+ * * Required: dataSource(), intervals(), dimensions() and query() must be called before build() - *

+ * * Optional: filters(), granularity(), and context() can be called before build() - *

+ * * Usage example: *


    *   SearchQuery query = Druids.newSearchQueryBuilder()
@@ -678,9 +678,9 @@ public class Druids
 
   /**
    * A Builder for TimeBoundaryQuery.
-   * 

+ * * Required: dataSource() must be called before build() - *

+ * * Usage example: *


    *   TimeBoundaryQuery query = new MaxTimeQueryBuilder()
@@ -774,12 +774,12 @@ public class Druids
 
   /**
    * A Builder for Result.
-   * 

+ * * Required: timestamp() and value() must be called before build() - *

+ * * Usage example: *


-   *   Result result = Druids.newResultBuilder()
+   *   Result<T> result = Druids.newResultBuilder()
    *                            .timestamp(egDateTime)
    *                            .value(egValue)
    *                            .build();
@@ -840,9 +840,9 @@ public class Druids
 
   /**
    * A Builder for SegmentMetadataQuery.
-   * 

+ * * Required: dataSource(), intervals() must be called before build() - *

+ * * Usage example: *


    *   SegmentMetadataQuery query = new SegmentMetadataQueryBuilder()
@@ -948,9 +948,9 @@ public class Druids
 
   /**
    * A Builder for SelectQuery.
-   * 

+ * * Required: dataSource(), intervals() must be called before build() - *

+ * * Usage example: *


    *   SelectQuery query = new SelectQueryBuilder()
diff --git a/processing/src/main/java/io/druid/query/QuerySegmentWalker.java b/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
index 8e8c6b8c0fe..2ec96f52070 100644
--- a/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
+++ b/processing/src/main/java/io/druid/query/QuerySegmentWalker.java
@@ -29,6 +29,8 @@ public interface QuerySegmentWalker
    * Gets the Queryable for a given interval, the Queryable returned can be any version(s) or partitionNumber(s)
    * such that it represents the interval.
    *
+   * @param  query result type
+   * @param query the query to find a Queryable for
    * @param intervals the intervals to find a Queryable for
    * @return a Queryable object that represents the interval
    */
@@ -36,8 +38,10 @@ public interface QuerySegmentWalker
 
   /**
    * Gets the Queryable for a given list of SegmentSpecs.
-   * exist.
    *
+   * @param  the query result type
+   * @param query the query to return a Queryable for
+   * @param specs the list of SegmentSpecs to find a Queryable for
    * @return the Queryable object with the given SegmentSpecs
    */
   public  QueryRunner getQueryRunnerForSegments(Query query, Iterable specs);
diff --git a/processing/src/main/java/io/druid/query/QueryToolChest.java b/processing/src/main/java/io/druid/query/QueryToolChest.java
index d2722c622be..7d42eda6792 100644
--- a/processing/src/main/java/io/druid/query/QueryToolChest.java
+++ b/processing/src/main/java/io/druid/query/QueryToolChest.java
@@ -40,9 +40,9 @@ public abstract class QueryToolChest mergeSequences(Sequence> seqOfSequences);
 
diff --git a/processing/src/main/java/io/druid/query/TimewarpOperator.java b/processing/src/main/java/io/druid/query/TimewarpOperator.java
index 49a8fb1ed85..66df6e3cf55 100644
--- a/processing/src/main/java/io/druid/query/TimewarpOperator.java
+++ b/processing/src/main/java/io/druid/query/TimewarpOperator.java
@@ -134,7 +134,7 @@ public class TimewarpOperator implements PostProcessingOperator
   /**
    * Map time t into the last `period` ending within `dataInterval`
    *
-   * @param t
+   * @param t the current time to be mapped into `dataInterval`
    * @return the offset between the mapped time and time t
    */
   protected long computeOffset(final long t)
diff --git a/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java b/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
index ad355c71b6b..bbfa6ae3dab 100644
--- a/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
+++ b/processing/src/main/java/io/druid/query/aggregation/AggregatorFactory.java
@@ -26,9 +26,9 @@ import java.util.List;
 
 /**
  * Processing related interface
- * 

+ * * An AggregatorFactory is an object that knows how to generate an Aggregator using a ColumnSelectorFactory. - *

+ * * This is useful as an abstraction to allow Aggregator classes to be written in terms of MetricSelector objects * without making any assumptions about how they are pulling values out of the base data. That is, the data is * provided to the Aggregator through the MetricSelector object, so whatever creates that object gets to choose how diff --git a/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java b/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java index 61617c42dd5..0b07e2745e8 100644 --- a/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java +++ b/processing/src/main/java/io/druid/query/aggregation/AggregatorUtil.java @@ -37,6 +37,8 @@ public class AggregatorUtil * such that all the dependencies of any given aggregator should occur before that aggregator. * See AggregatorUtilTest.testOutOfOrderPruneDependentPostAgg for example. * @param postAggName name of the postAgg on which dependency is to be calculated + * + * @return the list of dependent postAggregators */ public static List pruneDependentPostAgg(List postAggregatorList, String postAggName) { diff --git a/processing/src/main/java/io/druid/query/aggregation/Histogram.java b/processing/src/main/java/io/druid/query/aggregation/Histogram.java index b46c02bdc7d..61ace3710ae 100644 --- a/processing/src/main/java/io/druid/query/aggregation/Histogram.java +++ b/processing/src/main/java/io/druid/query/aggregation/Histogram.java @@ -136,6 +136,8 @@ public class Histogram * Returns a visual representation of a histogram object. * Initially returns an array of just the min. and max. values * but can also support the addition of quantiles. + * + * @return a visual representation of this histogram */ public HistogramVisual asVisual() { float[] visualCounts = new float[bins.length - 2]; diff --git a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java index 8e84e690130..b93c7ace592 100644 --- a/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java +++ b/processing/src/main/java/io/druid/query/aggregation/hyperloglog/HyperLogLogCollector.java @@ -28,18 +28,20 @@ import java.nio.ByteBuffer; /** * Implements the HyperLogLog cardinality estimator described in: - *

+ * * http://algo.inria.fr/flajolet/Publications/FlFuGaMe07.pdf - *

+ * * Run this code to see a simple indication of expected errors based on different m values: - *

- * for (int i = 1; i < 20; ++i) { - * System.out.printf("i[%,d], val[%,d] => error[%f%%]%n", i, 2 << i, 104 / Math.sqrt(2 << i)); - * } - *

+ * + * + * for (int i = 1; i < 20; ++i) { + * System.out.printf("i[%,d], val[%,d] => error[%f%%]%n", i, 2 << i, 104 / Math.sqrt(2 << i)); + * } + * + * * This class is *not* multi-threaded. It can be passed among threads, but it is written with the assumption that * only one thread is ever calling methods on it. - *

+ * * If you have multiple threads calling methods on this concurrently, I hope you manage to get correct behavior */ public abstract class HyperLogLogCollector implements Comparable diff --git a/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java b/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java index 6d4c2c78071..87e67e3f947 100644 --- a/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java +++ b/processing/src/main/java/io/druid/query/groupby/having/GreaterThanHavingSpec.java @@ -29,7 +29,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; /** - * The ">" operator in a "having" clause. This is similar to SQL's "having aggregation > value", + * The ">" operator in a "having" clause. This is similar to SQL's "having aggregation > value", * except that an aggregation in SQL is an expression instead of an aggregation name as in Druid. */ public class GreaterThanHavingSpec implements HavingSpec diff --git a/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java b/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java index 1f1a27bd9a7..61f4200e1ca 100644 --- a/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java +++ b/processing/src/main/java/io/druid/query/groupby/having/LessThanHavingSpec.java @@ -28,7 +28,7 @@ import java.nio.ByteBuffer; import java.util.Arrays; /** - * The "<" operator in a "having" clause. This is similar to SQL's "having aggregation < value", + * The "<" operator in a "having" clause. This is similar to SQL's "having aggregation < value", * except that an aggregation in SQL is an expression instead of an aggregation name as in Druid. */ public class LessThanHavingSpec implements HavingSpec diff --git a/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java b/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java index 8f78ffe6191..234545354cd 100644 --- a/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java +++ b/processing/src/main/java/io/druid/query/topn/TopNQueryBuilder.java @@ -39,12 +39,12 @@ import java.util.Map; /** * A Builder for TopNQuery. - *

+ * * Required: dataSource(), intervals(), metric() and threshold() must be called before build() * Additional requirement for numeric metric sorts: aggregators() must be called before build() - *

+ * * Optional: filters(), granularity(), postAggregators() and context() can be called before build() - *

+ * * Usage example: *


  *   TopNQuery query = new TopNQueryBuilder()
diff --git a/processing/src/main/java/io/druid/segment/DimensionSelector.java b/processing/src/main/java/io/druid/segment/DimensionSelector.java
index 3922d345049..9627a4fef7d 100644
--- a/processing/src/main/java/io/druid/segment/DimensionSelector.java
+++ b/processing/src/main/java/io/druid/segment/DimensionSelector.java
@@ -42,7 +42,7 @@ public interface DimensionSelector
    *
    * Value cardinality would be 2.
    *
-   * @return
+   * @return the value cardinality
    */
   public int getValueCardinality();
 
@@ -57,26 +57,26 @@ public interface DimensionSelector
    *
    * getRow() would return
    *
-   * getRow(0) => [0 1]
-   * getRow(1) => [0]
-   * getRow(2) => [0 1]
-   * getRow(3) => [1]
+   * getRow(0) => [0 1]
+   * getRow(1) => [0]
+   * getRow(2) => [0 1]
+   * getRow(3) => [1]
    *
    * and then lookupName would return:
    *
-   * lookupName(0) => A
-   * lookupName(1) => B
+   * lookupName(0) => A
+   * lookupName(1) => B
    *
-   * @param id
-   * @return
+   * @param id id to lookup the field name for
+   * @return the field name for the given id
    */
   public String lookupName(int id);
 
   /**
    * The ID is the int id value of the field.
    *
-   * @param name
-   * @return
+   * @param name field name to look up the id for
+   * @return the id for the given field name
    */
   public int lookupId(String name);
 }
diff --git a/processing/src/main/java/io/druid/segment/IndexMerger.java b/processing/src/main/java/io/druid/segment/IndexMerger.java
index 99f854da9af..fdccd4cdb22 100644
--- a/processing/src/main/java/io/druid/segment/IndexMerger.java
+++ b/processing/src/main/java/io/druid/segment/IndexMerger.java
@@ -114,7 +114,8 @@ public class IndexMerger
    * @param dataInterval the Interval that the data represents
    * @param outDir       the directory to persist the data to
    *
-   * @throws java.io.IOException
+   * @return the index output directory
+   * @throws java.io.IOException if an IO error occurs persisting the index
    */
   public static File persist(final IncrementalIndex index, final Interval dataInterval, File outDir) throws IOException
   {
diff --git a/processing/src/main/java/io/druid/segment/QueryableIndex.java b/processing/src/main/java/io/druid/segment/QueryableIndex.java
index 7dc1dcbe5d4..7de331e80e3 100644
--- a/processing/src/main/java/io/druid/segment/QueryableIndex.java
+++ b/processing/src/main/java/io/druid/segment/QueryableIndex.java
@@ -33,7 +33,7 @@ public interface QueryableIndex extends ColumnSelector
 
   /**
    * The close method shouldn't actually be here as this is nasty. We will adjust it in the future.
-   * @throws java.io.IOException
+   * @throws java.io.IOException if an exception was thrown closing the index
    */
   @Deprecated
   public void close() throws IOException;
diff --git a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
index 3bb29f16d53..95ec126c8bb 100644
--- a/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
+++ b/processing/src/main/java/io/druid/segment/data/GenericIndexed.java
@@ -41,13 +41,13 @@ import java.util.Map;
 /**
  * A generic, flat storage mechanism.  Use static methods fromArray() or fromIterable() to construct.  If input
  * is sorted, supports binary search index lookups.  If input is not sorted, only supports array-like index lookups.
- * 

+ * * V1 Storage Format: - *

+ * * byte 1: version (0x1) - * byte 2 == 0x1 => allowReverseLookup - * bytes 3-6 => numBytesUsed - * bytes 7-10 => numElements + * byte 2 == 0x1 => allowReverseLookup + * bytes 3-6 => numBytesUsed + * bytes 7-10 => numElements * bytes 10-((numElements * 4) + 10): integers representing *end* offsets of byte serialized values * bytes ((numElements * 4) + 10)-(numBytesUsed + 2): 4-byte integer representing length of value, followed by bytes for value */ @@ -327,8 +327,9 @@ public class GenericIndexed implements Indexed, Closeable /** * The returned GenericIndexed must be closed to release the underlying memory - * @param maxBytes - * @return + * + * @param maxBytes maximum size in bytes of the lookup cache + * @return a copy of this GenericIndexed with a lookup cache. */ public GenericIndexed withCache(int maxBytes) { diff --git a/processing/src/main/java/io/druid/segment/data/Indexed.java b/processing/src/main/java/io/druid/segment/data/Indexed.java index f8def0481b7..fe05d0f6d3d 100644 --- a/processing/src/main/java/io/druid/segment/data/Indexed.java +++ b/processing/src/main/java/io/druid/segment/data/Indexed.java @@ -19,9 +19,6 @@ package io.druid.segment.data; -/** - * @param - */ public interface Indexed extends Iterable { Class getClazz(); diff --git a/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java b/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java index 0f039ab0598..d9b2a8ab2c0 100644 --- a/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java +++ b/processing/src/main/java/io/druid/segment/data/ObjectStrategy.java @@ -34,7 +34,7 @@ public interface ObjectStrategy extends Comparator * * @param buffer buffer to read value from * @param numBytes number of bytes used to store the value, starting at buffer.position() - * @return + * @return an object created from the given byte buffer representation */ public T fromByteBuffer(ByteBuffer buffer, int numBytes); public byte[] toBytes(T val); diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java index 4f6f6a0f67f..487c9544f92 100644 --- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java +++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java @@ -144,7 +144,7 @@ public class IncrementalIndex implements Iterable /** * Adds a new row. The row might correspond with another row that already exists, in which case this will * update that row instead of inserting a new one. - *

+ * * * Calls to add() are thread safe. * diff --git a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java index 2ac3b309f62..439adb5886a 100644 --- a/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java +++ b/processing/src/main/java/io/druid/segment/serde/ComplexMetricSerde.java @@ -40,6 +40,7 @@ public abstract class ComplexMetricSerde * added to the builder. * * @param buffer the buffer to deserialize + * @param builder ColumnBuilder to add the column to * @return a ColumnPartSerde that can serialize out the object that was read from the buffer to the builder */ public abstract ColumnPartSerde deserializeColumn(ByteBuffer buffer, ColumnBuilder builder); diff --git a/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java b/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java index c2e43e3917a..86657e6216a 100644 --- a/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java +++ b/rabbitmq/src/main/java/io/druid/firehose/rabbitmq/RabbitMQFirehoseFactory.java @@ -49,14 +49,14 @@ import java.util.concurrent.LinkedBlockingQueue; /** * A FirehoseFactory for RabbitMQ. - *

+ * * It will receive it's configuration through the realtime.spec file and expects to find a * consumerProps element in the firehose definition with values for a number of configuration options. * Below is a complete example for a RabbitMQ firehose configuration with some explanation. Options * that have defaults can be skipped but options with no defaults must be specified with the exception * of the URI property. If the URI property is set, it will override any other property that was also * set. - *

+ * * File: realtime.spec *

  *   "firehose" : {
@@ -88,7 +88,7 @@ import java.util.concurrent.LinkedBlockingQueue;
  *     }
  *   },
  * 
- *

+ * * Limitations: This implementation will not attempt to reconnect to the MQ broker if the * connection to it is lost. Furthermore it does not support any automatic failover on high availability * RabbitMQ clusters. This is not supported by the underlying AMQP client library and while the behavior @@ -96,7 +96,7 @@ import java.util.concurrent.LinkedBlockingQueue; * the RabbitMQ cluster that sets the "ha-mode" and "ha-sync-mode" properly on the queue that this * Firehose connects to, messages should survive an MQ broker node failure and be delivered once a * connection to another node is set up. - *

+ * * For more information on RabbitMQ high availability please see: * http://www.rabbitmq.com/ha.html. */ diff --git a/server/src/main/java/io/druid/curator/announcement/Announcer.java b/server/src/main/java/io/druid/curator/announcement/Announcer.java index a0f396d4396..a9a8c3e3ffe 100644 --- a/server/src/main/java/io/druid/curator/announcement/Announcer.java +++ b/server/src/main/java/io/druid/curator/announcement/Announcer.java @@ -318,10 +318,10 @@ public class Announcer /** * Unannounces an announcement created at path. Note that if all announcements get removed, the Announcer * will continue to have ZK watches on paths because clearing them out is a source of ugly race conditions. - *

+ * * If you need to completely clear all the state of what is being watched and announced, stop() the Announcer. * - * @param path + * @param path the path to unannounce */ public void unannounce(String path) { diff --git a/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java b/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java index fdadea9292d..c84f086d25a 100644 --- a/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java +++ b/server/src/main/java/io/druid/curator/discovery/DiscoveryModule.java @@ -54,7 +54,7 @@ import java.util.concurrent.ThreadFactory; /** * The DiscoveryModule allows for the registration of Keys of DruidNode objects, which it intends to be * automatically announced at the end of the lifecycle start. - *

+ * * In order for this to work a ServiceAnnouncer instance *must* be injected and instantiated first. * This can often be achieved by registering ServiceAnnouncer.class with the LifecycleModule. */ @@ -64,10 +64,12 @@ public class DiscoveryModule implements Module /** * Requests that the un-annotated DruidNode instance be injected and published as part of the lifecycle. - *

+ * * That is, this module will announce the DruidNode instance returned by * injector.getInstance(Key.get(DruidNode.class)) automatically. * Announcement will happen in the LAST stage of the Lifecycle + * + * @param binder the Binder to register with */ public static void registerDefault(Binder binder) { @@ -76,7 +78,7 @@ public class DiscoveryModule implements Module /** * Requests that the annotated DruidNode instance be injected and published as part of the lifecycle. - *

+ * * That is, this module will announce the DruidNode instance returned by * injector.getInstance(Key.get(DruidNode.class, annotation)) automatically. * Announcement will happen in the LAST stage of the Lifecycle @@ -90,11 +92,12 @@ public class DiscoveryModule implements Module /** * Requests that the annotated DruidNode instance be injected and published as part of the lifecycle. - *

+ * * That is, this module will announce the DruidNode instance returned by * injector.getInstance(Key.get(DruidNode.class, annotation)) automatically. * Announcement will happen in the LAST stage of the Lifecycle * + * @param binder the Binder to register with * @param annotation The annotation class to use in finding the DruidNode instance */ public static void register(Binder binder, Class annotation) @@ -104,11 +107,12 @@ public class DiscoveryModule implements Module /** * Requests that the keyed DruidNode instance be injected and published as part of the lifecycle. - *

+ * * That is, this module will announce the DruidNode instance returned by * injector.getInstance(Key.get(DruidNode.class, annotation)) automatically. * Announcement will happen in the LAST stage of the Lifecycle * + * @param binder the Binder to register with * @param key The key to use in finding the DruidNode instance */ public static void registerKey(Binder binder, Key key) diff --git a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java index dcff14024e3..d32bdd147f3 100644 --- a/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java +++ b/server/src/main/java/io/druid/curator/inventory/CuratorInventoryManager.java @@ -44,10 +44,10 @@ import java.util.concurrent.atomic.AtomicReference; /** * An InventoryManager watches updates to inventory on Zookeeper (or some other discovery-like service publishing * system). It is built up on two object types: containers and inventory objects. - *

+ * * The logic of the InventoryManager just maintains a local cache of the containers and inventory it sees on ZK. It * provides methods for getting at the container objects, which house the actual individual pieces of inventory. - *

+ * * A Strategy is provided to the constructor of an Inventory manager, this strategy provides all of the * object-specific logic to serialize, deserialize, compose and alter the container and inventory objects. */ diff --git a/server/src/main/java/io/druid/initialization/Initialization.java b/server/src/main/java/io/druid/initialization/Initialization.java index 84feb11bbc3..9d88e9d9f75 100644 --- a/server/src/main/java/io/druid/initialization/Initialization.java +++ b/server/src/main/java/io/druid/initialization/Initialization.java @@ -102,7 +102,7 @@ public class Initialization /** * @param clazz Module class - * @param + * @param Module type * * @return Returns the set of modules loaded. */ diff --git a/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java b/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java index c57c0575e91..263d820e8b4 100644 --- a/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java +++ b/server/src/main/java/io/druid/segment/indexing/granularity/GranularitySpec.java @@ -40,10 +40,19 @@ import java.util.SortedSet; }) public interface GranularitySpec { - /** Set of all time groups, broken up on segment boundaries. Should be sorted by interval start and non-overlapping.*/ - public Optional> bucketIntervals(); + /** + * Set of all time groups, broken up on segment boundaries. Should be sorted by interval start and non-overlapping. + * + * @return set of all time groups + */ + public Optional> bucketIntervals(); - /** Time-grouping interval corresponding to some instant, if any. */ + /** + * Time-grouping interval corresponding to some instant, if any. + * + * @param dt instant to return time interval for + * @return optional time interval + * */ public Optional bucketInterval(DateTime dt); public Granularity getSegmentGranularity(); diff --git a/server/src/main/java/io/druid/segment/realtime/FireDepartment.java b/server/src/main/java/io/druid/segment/realtime/FireDepartment.java index 3172bcb6f44..9e410b4d35e 100644 --- a/server/src/main/java/io/druid/segment/realtime/FireDepartment.java +++ b/server/src/main/java/io/druid/segment/realtime/FireDepartment.java @@ -39,7 +39,7 @@ import java.io.IOException; /** * A Fire Department has a Firehose and a Plumber. - *

+ * * This is a metaphor for a realtime stream (Firehose) and a coordinator of sinks (Plumber). The Firehose provides the * realtime stream of data. The Plumber directs each drop of water from the firehose into the correct sink and makes * sure that the sinks don't overflow. diff --git a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java index 98667948487..e48dbf7ac0f 100644 --- a/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java +++ b/server/src/main/java/io/druid/segment/realtime/plumber/RealtimePlumber.java @@ -663,6 +663,9 @@ public class RealtimePlumber implements Plumber * Unannounces a given sink and removes all local references to it. It is important that this is only called * from the single-threaded mergeExecutor, since otherwise chaos may ensue if merged segments are deleted while * being created. + * + * @param truncatedTime sink key + * @param sink sink to unannounce */ protected void abandonSegment(final long truncatedTime, final Sink sink) { @@ -700,9 +703,9 @@ public class RealtimePlumber implements Plumber /** * Persists the given hydrant and returns the number of rows persisted * - * @param indexToPersist - * @param schema - * @param interval + * @param indexToPersist hydrant to persist + * @param schema datasource schema + * @param interval interval to persist * * @return the number of rows persisted */