diff --git a/benchmarks/src/main/java/io/druid/benchmark/datagen/EnumeratedTreeDistribution.java b/benchmarks/src/main/java/io/druid/benchmark/datagen/EnumeratedTreeDistribution.java
index 5cab5af7ee7..d8ff5c9d136 100644
--- a/benchmarks/src/main/java/io/druid/benchmark/datagen/EnumeratedTreeDistribution.java
+++ b/benchmarks/src/main/java/io/druid/benchmark/datagen/EnumeratedTreeDistribution.java
@@ -25,7 +25,7 @@ import org.apache.commons.math3.util.Pair;
import java.util.List;
import java.util.TreeMap;
-/*
+/**
* EnumeratedDistrubtion's sample() method does a linear scan through the array of probabilities.
*
* This is too slow with high cardinality value sets, so this subclass overrides sample() to use
diff --git a/benchmarks/src/main/java/io/druid/benchmark/datagen/RealRoundingDistribution.java b/benchmarks/src/main/java/io/druid/benchmark/datagen/RealRoundingDistribution.java
index e913f3a8a0f..04c78a486c6 100644
--- a/benchmarks/src/main/java/io/druid/benchmark/datagen/RealRoundingDistribution.java
+++ b/benchmarks/src/main/java/io/druid/benchmark/datagen/RealRoundingDistribution.java
@@ -22,7 +22,7 @@ package io.druid.benchmark.datagen;
import org.apache.commons.math3.distribution.AbstractIntegerDistribution;
import org.apache.commons.math3.distribution.AbstractRealDistribution;
-/*
+/**
* Rounds the output values from the sample() function of an AbstractRealDistribution.
*/
public class RealRoundingDistribution extends AbstractIntegerDistribution
diff --git a/codestyle/checkstyle.xml b/codestyle/checkstyle.xml
index cd8ad3c15de..860df828adc 100644
--- a/codestyle/checkstyle.xml
+++ b/codestyle/checkstyle.xml
@@ -53,5 +53,10 @@
+
+
+
+
+
diff --git a/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java b/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
index fbb878efbc1..0256a8835aa 100644
--- a/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
+++ b/indexing-hadoop/src/main/java/io/druid/indexer/DetermineHashedPartitionsJob.java
@@ -82,7 +82,7 @@ public class DetermineHashedPartitionsJob implements Jobby
* Group by (timestamp, dimensions) so we can correctly count dimension values as they would appear
* in the final segment.
*/
- long startTime = System.currentTimeMillis();
+ final long startTime = System.currentTimeMillis();
final Job groupByJob = Job.getInstance(
new Configuration(),
String.format("%s-determine_partitions_hashed-%s", config.getDataSource(), config.getIntervals())
diff --git a/integration-tests/src/main/java/org/testng/TestNG.java b/integration-tests/src/main/java/org/testng/TestNG.java
index 6e7fc4ac798..d990e14c229 100644
--- a/integration-tests/src/main/java/org/testng/TestNG.java
+++ b/integration-tests/src/main/java/org/testng/TestNG.java
@@ -2211,7 +2211,7 @@ public class TestNG
private URLClassLoader m_serviceLoaderClassLoader;
private List m_serviceLoaderListeners = Lists.newArrayList();
- /*
+ /**
* Used to test ServiceClassLoader
*/
public void setServiceLoaderClassLoader(URLClassLoader ucl)
@@ -2219,7 +2219,7 @@ public class TestNG
m_serviceLoaderClassLoader = ucl;
}
- /*
+ /**
* Used to test ServiceClassLoader
*/
private void addServiceLoaderListener(ITestNGListener l)
@@ -2227,7 +2227,7 @@ public class TestNG
m_serviceLoaderListeners.add(l);
}
- /*
+ /**
* Used to test ServiceClassLoader
*/
public List getServiceLoaderListeners()
diff --git a/processing/src/main/java/io/druid/query/aggregation/BufferAggregator.java b/processing/src/main/java/io/druid/query/aggregation/BufferAggregator.java
index 1951a6df4ae..c8e957605d8 100644
--- a/processing/src/main/java/io/druid/query/aggregation/BufferAggregator.java
+++ b/processing/src/main/java/io/druid/query/aggregation/BufferAggregator.java
@@ -127,7 +127,7 @@ public interface BufferAggregator extends HotLoopCallee
{
}
- /*
+ /**
* Relocates any cached objects.
* If underlying ByteBuffer used for aggregation buffer relocates to a new ByteBuffer, positional caches(if any)
* built on top of old ByteBuffer can not be used for further {@link BufferAggregator#aggregate(ByteBuffer, int)}
diff --git a/processing/src/main/java/io/druid/query/groupby/epinephelinae/Groupers.java b/processing/src/main/java/io/druid/query/groupby/epinephelinae/Groupers.java
index 2990a07548e..92400427cfd 100644
--- a/processing/src/main/java/io/druid/query/groupby/epinephelinae/Groupers.java
+++ b/processing/src/main/java/io/druid/query/groupby/epinephelinae/Groupers.java
@@ -34,7 +34,7 @@ public class Groupers
private static final int C1 = 0xcc9e2d51;
private static final int C2 = 0x1b873593;
- /*
+ /**
* This method was rewritten in Java from an intermediate step of the Murmur hash function in
* https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp, which contained the
* following header:
diff --git a/processing/src/main/java/io/druid/query/metadata/metadata/SegmentMetadataQuery.java b/processing/src/main/java/io/druid/query/metadata/metadata/SegmentMetadataQuery.java
index 130025c6077..89ab573cce5 100644
--- a/processing/src/main/java/io/druid/query/metadata/metadata/SegmentMetadataQuery.java
+++ b/processing/src/main/java/io/druid/query/metadata/metadata/SegmentMetadataQuery.java
@@ -45,7 +45,8 @@ import java.util.Objects;
public class SegmentMetadataQuery extends BaseQuery
{
- /* The SegmentMetadataQuery cache key may contain UTF-8 column name strings.
+ /**
+ * The SegmentMetadataQuery cache key may contain UTF-8 column name strings.
* Prepend 0xFF before the analysisTypes as a separator to avoid
* any potential confusion with string values.
*/
diff --git a/processing/src/main/java/io/druid/segment/data/CompressionFactory.java b/processing/src/main/java/io/druid/segment/data/CompressionFactory.java
index 09c4da117e2..94b2335762f 100644
--- a/processing/src/main/java/io/druid/segment/data/CompressionFactory.java
+++ b/processing/src/main/java/io/druid/segment/data/CompressionFactory.java
@@ -63,7 +63,7 @@ public class CompressionFactory
// encoding format for segments created prior to the introduction of encoding formats
public static final LongEncodingFormat LEGACY_LONG_ENCODING_FORMAT = LongEncodingFormat.LONGS;
- /*
+ /**
* Delta Encoding Header v1:
* Byte 1 : version
* Byte 2 - 9 : base value
@@ -71,7 +71,7 @@ public class CompressionFactory
*/
public static final byte DELTA_ENCODING_VERSION = 0x1;
- /*
+ /**
* Table Encoding Header v1 :
* Byte 1 : version
* Byte 2 - 5 : table size
@@ -112,7 +112,7 @@ public class CompressionFactory
return hasEncodingFlag(strategyId) ? (byte) (strategyId + FLAG_VALUE) : strategyId;
}
- /*
+ /**
* The compression of decompression of encodings are separated into different enums. EncodingStrategy refers to the
* strategy used to encode the data, and EncodingFormat refers to the format the data is encoded in. Note there is not
* necessarily an one-to-one mapping between to two. For instance, the AUTO LongEncodingStrategy scans the data once
@@ -120,7 +120,6 @@ public class CompressionFactory
* write in any of the LongEncodingFormat. On the other hand, there are no LongEncodingStrategy that always write in
* TABLE LongEncodingFormat since it only works for data with low cardinality.
*/
-
public enum LongEncodingStrategy
{
/**
diff --git a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
index 1daa24ad61e..75e64b1f2f8 100644
--- a/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
+++ b/processing/src/main/java/io/druid/segment/incremental/IncrementalIndex.java
@@ -595,7 +595,7 @@ public abstract class IncrementalIndex implements Iterable,
return capabilities;
}
- /*
+ /**
* Currently called to initialize IncrementalIndex dimension order during index creation
* Index dimension ordering could be changed to initialize from DimensionsSpec after resolution of
* https://github.com/druid-io/druid/issues/2011