Move opening braces on new line to the end of the previous line (elastic/elasticsearch#473)

Original commit: elastic/x-pack-elasticsearch@57aedab104
This commit is contained in:
David Kyle 2016-12-06 13:05:15 +00:00 committed by GitHub
parent 570cde7a6a
commit 36d6141885
32 changed files with 186 additions and 370 deletions

View File

@ -12,21 +12,18 @@ import org.elasticsearch.common.ParseField;
* The serialised models can get very large and only the C++ code
* understands how to decode them, hence there is no reason to load
* them into the Java process.
*
* However, the Java process DOES set up a mapping on the Elasticsearch
* index to tell Elasticsearch not to analyse the model state documents
* in any way. (Otherwise Elasticsearch would go into a spin trying to
* make sense of such large JSON documents.)
*/
public class ModelState
{
public class ModelState {
/**
* The type of this class used when persisting the data
*/
public static final ParseField TYPE = new ParseField("model_state");
private ModelState()
{
private ModelState() {
}
}

View File

@ -10,8 +10,7 @@ import java.time.Duration;
/**
* Factory methods for a sensible default for the scheduler frequency
*/
public final class DefaultFrequency
{
public final class DefaultFrequency {
private static final int SECONDS_IN_MINUTE = 60;
private static final int TWO_MINS_SECONDS = 2 * SECONDS_IN_MINUTE;
private static final int TWENTY_MINS_SECONDS = 20 * SECONDS_IN_MINUTE;
@ -19,42 +18,36 @@ public final class DefaultFrequency
private static final Duration TEN_MINUTES = Duration.ofMinutes(10);
private static final Duration ONE_HOUR = Duration.ofHours(1);
private DefaultFrequency()
{
private DefaultFrequency() {
// Do nothing
}
/**
* Creates a sensible default frequency for a given bucket span.
*
* <p>
* The default depends on the bucket span:
* <ul>
* <li> &lt;= 2 mins -&gt; 1 min</li>
* <li> &lt;= 20 mins -&gt; bucket span / 2</li>
* <li> &lt;= 12 hours -&gt; 10 mins</li>
* <li> &gt; 12 hours -&gt; 1 hour</li>
* <li> &lt;= 2 mins -&gt; 1 min</li>
* <li> &lt;= 20 mins -&gt; bucket span / 2</li>
* <li> &lt;= 12 hours -&gt; 10 mins</li>
* <li> &gt; 12 hours -&gt; 1 hour</li>
* </ul>
*
* @param bucketSpanSeconds the bucket span in seconds
* @return the default frequency
*/
public static Duration ofBucketSpan(long bucketSpanSeconds)
{
if (bucketSpanSeconds <= 0)
{
public static Duration ofBucketSpan(long bucketSpanSeconds) {
if (bucketSpanSeconds <= 0) {
throw new IllegalArgumentException("Bucket span has to be > 0");
}
if (bucketSpanSeconds <= TWO_MINS_SECONDS)
{
if (bucketSpanSeconds <= TWO_MINS_SECONDS) {
return Duration.ofSeconds(SECONDS_IN_MINUTE);
}
if (bucketSpanSeconds <= TWENTY_MINS_SECONDS)
{
if (bucketSpanSeconds <= TWENTY_MINS_SECONDS) {
return Duration.ofSeconds(bucketSpanSeconds / 2);
}
if (bucketSpanSeconds <= HALF_DAY_SECONDS)
{
if (bucketSpanSeconds <= HALF_DAY_SECONDS) {
return TEN_MINUTES;
}
return ONE_HOUR;

View File

@ -7,8 +7,7 @@ package org.elasticsearch.xpack.prelert.job.detectionrules;
import java.util.Locale;
public enum RuleAction
{
public enum RuleAction {
FILTER_RESULTS;
/**
@ -17,8 +16,7 @@ public enum RuleAction
* @param value String representation
* @return The rule action
*/
public static RuleAction forString(String value)
{
public static RuleAction forString(String value) {
return RuleAction.valueOf(value.toUpperCase(Locale.ROOT));
}
}

View File

@ -11,8 +11,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Optional;
public interface DataExtractor
{
public interface DataExtractor {
/**
* Set-up the extractor for a new search
*

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.prelert.job.extraction;
import org.elasticsearch.xpack.prelert.job.Job;
public interface DataExtractorFactory
{
public interface DataExtractorFactory {
DataExtractor newExtractor(Job job);
}

View File

@ -12,8 +12,7 @@ import java.util.ResourceBundle;
/**
* Defines the keys for all the message strings
*/
public final class Messages
{
public final class Messages {
/**
* The base name of the bundle without the .properties extension
* or locale
@ -266,12 +265,10 @@ public final class Messages
public static final String SUPPORT_BUNDLE_SCRIPT_ERROR = "support.bundle.script.error";
private Messages()
{
private Messages() {
}
public static ResourceBundle load()
{
public static ResourceBundle load() {
return ResourceBundle.getBundle(Messages.BUNDLE_NAME, Locale.getDefault());
}
@ -280,8 +277,7 @@ public final class Messages
*
* @param key Must be one of the statics defined in this file]
*/
public static String getMessage(String key)
{
public static String getMessage(String key) {
return load().getString(key);
}
@ -291,8 +287,7 @@ public final class Messages
* @param key the key for the message
* @param args MessageFormat arguments. See {@linkplain MessageFormat#format(Object)}]
*/
public static String getMessage(String key, Object...args)
{
public static String getMessage(String key, Object...args) {
return new MessageFormat(load().getString(key), Locale.ROOT).format(args);
}
}

View File

@ -12,8 +12,7 @@ import java.util.NoSuchElementException;
* An iterator useful to fetch a big number of documents of type T
* and iterate through them in batches.
*/
public interface BatchedDocumentsIterator<T>
{
public interface BatchedDocumentsIterator<T> {
/**
* Query documents whose timestamp is within the given time range
*

View File

@ -17,17 +17,14 @@ import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.results.Influencer;
class ElasticsearchBatchedInfluencersIterator extends ElasticsearchBatchedResultsIterator<Influencer>
{
class ElasticsearchBatchedInfluencersIterator extends ElasticsearchBatchedResultsIterator<Influencer> {
public ElasticsearchBatchedInfluencersIterator(Client client, String jobId,
ParseFieldMatcher parserFieldMatcher)
{
ParseFieldMatcher parserFieldMatcher) {
super(client, jobId, Influencer.RESULT_TYPE_VALUE, parserFieldMatcher);
}
@Override
protected Influencer map(SearchHit hit)
{
protected Influencer map(SearchHit hit) {
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {

View File

@ -16,22 +16,18 @@ import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
import java.io.IOException;
class ElasticsearchBatchedModelDebugOutputIterator extends ElasticsearchBatchedDocumentsIterator<ModelDebugOutput>
{
public ElasticsearchBatchedModelDebugOutputIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
class ElasticsearchBatchedModelDebugOutputIterator extends ElasticsearchBatchedDocumentsIterator<ModelDebugOutput> {
public ElasticsearchBatchedModelDebugOutputIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher) {
super(client, JobResultsPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
protected String getType() {
return ModelDebugOutput.TYPE.getPreferredName();
}
@Override
protected ModelDebugOutput map(SearchHit hit)
{
protected ModelDebugOutput map(SearchHit hit) {
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {

View File

@ -17,22 +17,18 @@ import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.ModelSizeStats;
public class ElasticsearchBatchedModelSizeStatsIterator extends ElasticsearchBatchedDocumentsIterator<ModelSizeStats>
{
public ElasticsearchBatchedModelSizeStatsIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
public class ElasticsearchBatchedModelSizeStatsIterator extends ElasticsearchBatchedDocumentsIterator<ModelSizeStats> {
public ElasticsearchBatchedModelSizeStatsIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher) {
super(client, JobResultsPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
protected String getType() {
return ModelSizeStats.TYPE.getPreferredName();
}
@Override
protected ModelSizeStats map(SearchHit hit)
{
protected ModelSizeStats map(SearchHit hit) {
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {

View File

@ -17,22 +17,18 @@ import java.io.IOException;
import org.elasticsearch.xpack.prelert.job.ModelSnapshot;
class ElasticsearchBatchedModelSnapshotIterator extends ElasticsearchBatchedDocumentsIterator<ModelSnapshot>
{
public ElasticsearchBatchedModelSnapshotIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher)
{
class ElasticsearchBatchedModelSnapshotIterator extends ElasticsearchBatchedDocumentsIterator<ModelSnapshot> {
public ElasticsearchBatchedModelSnapshotIterator(Client client, String jobId, ParseFieldMatcher parserFieldMatcher) {
super(client, JobResultsPersister.getJobIndexName(jobId), parserFieldMatcher);
}
@Override
protected String getType()
{
protected String getType() {
return ModelSnapshot.TYPE.getPreferredName();
}
@Override
protected ModelSnapshot map(SearchHit hit)
{
protected ModelSnapshot map(SearchHit hit) {
BytesReference source = hit.getSourceRef();
XContentParser parser;
try {

View File

@ -16,15 +16,13 @@ import org.elasticsearch.xpack.prelert.job.results.ReservedFieldNames;
* Interprets field names containing dots as nested JSON structures.
* This matches what Elasticsearch does.
*/
class ElasticsearchDotNotationReverser
{
class ElasticsearchDotNotationReverser {
private static final char DOT = '.';
private static final Pattern DOT_PATTERN = Pattern.compile("\\.");
private final Map<String, Object> resultsMap;
public ElasticsearchDotNotationReverser()
{
public ElasticsearchDotNotationReverser() {
resultsMap = new TreeMap<>();
}
@ -33,6 +31,7 @@ class ElasticsearchDotNotationReverser
// results are only strings, so it's not "minimum viable product" right
// now. Hence this method only takes fieldValue as a String and there are
// no overloads.
/**
* Given a field name and value, convert it to a map representation of the
* (potentially nested) JSON structure Elasticsearch would use to store it.
@ -41,18 +40,14 @@ class ElasticsearchDotNotationReverser
* <code>foo.bar = y</code> goes to <code>{ "foo" : { "bar" : "y" } }</code>
*/
@SuppressWarnings("unchecked")
public void add(String fieldName, String fieldValue)
{
if (fieldName == null || fieldValue == null)
{
public void add(String fieldName, String fieldValue) {
if (fieldName == null || fieldValue == null) {
return;
}
// Minimise processing in the simple case of no dots in the field name.
if (fieldName.indexOf(DOT) == -1)
{
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(fieldName))
{
if (fieldName.indexOf(DOT) == -1) {
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(fieldName)) {
return;
}
resultsMap.put(fieldName, fieldValue);
@ -63,39 +58,27 @@ class ElasticsearchDotNotationReverser
// If any segment created by the split is a reserved word then ignore
// the whole field.
for (String segment : segments)
{
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(segment))
{
for (String segment : segments) {
if (ReservedFieldNames.RESERVED_FIELD_NAMES.contains(segment)) {
return;
}
}
Map<String, Object> layerMap = resultsMap;
for (int i = 0; i < segments.length; ++i)
{
for (int i = 0; i < segments.length; ++i) {
String segment = segments[i];
if (i == segments.length - 1)
{
if (i == segments.length - 1) {
layerMap.put(segment, fieldValue);
}
else
{
} else {
Object existingLayerValue = layerMap.get(segment);
if (existingLayerValue == null)
{
if (existingLayerValue == null) {
Map<String, Object> nextLayerMap = new TreeMap<>();
layerMap.put(segment, nextLayerMap);
layerMap = nextLayerMap;
}
else
{
if (existingLayerValue instanceof Map)
{
layerMap = (Map<String, Object>)existingLayerValue;
}
else
{
} else {
if (existingLayerValue instanceof Map) {
layerMap = (Map<String, Object>) existingLayerValue;
} else {
// This implies an inconsistency - different additions
// imply the same path leads to both an object and a
// value. For example:
@ -108,8 +91,7 @@ class ElasticsearchDotNotationReverser
}
}
public Map<String, Object> getResultsMap()
{
public Map<String, Object> getResultsMap() {
return resultsMap;
}
@ -117,35 +99,27 @@ class ElasticsearchDotNotationReverser
* Mappings for a given hierarchical structure are more complex than the
* basic results.
*/
public Map<String, Object> getMappingsMap()
{
public Map<String, Object> getMappingsMap() {
Map<String, Object> mappingsMap = new TreeMap<>();
recurseMappingsLevel(resultsMap, mappingsMap);
return mappingsMap;
}
@SuppressWarnings("unchecked")
private void recurseMappingsLevel(Map<String, Object> resultsMap,
Map<String, Object> mappingsMap)
{
for (Map.Entry<String, Object> entry : resultsMap.entrySet())
{
private void recurseMappingsLevel(Map<String, Object> resultsMap, Map<String, Object> mappingsMap) {
for (Map.Entry<String, Object> entry : resultsMap.entrySet()) {
Map<String, Object> typeMap = new TreeMap<>();
String name = entry.getKey();
Object value = entry.getValue();
if (value instanceof Map)
{
if (value instanceof Map) {
Map<String, Object> propertiesMap = new TreeMap<>();
recurseMappingsLevel((Map<String, Object>)value, propertiesMap);
recurseMappingsLevel((Map<String, Object>) value, propertiesMap);
typeMap.put(ElasticsearchMappings.TYPE,
ElasticsearchMappings.OBJECT);
typeMap.put(ElasticsearchMappings.TYPE, ElasticsearchMappings.OBJECT);
typeMap.put(ElasticsearchMappings.PROPERTIES, propertiesMap);
mappingsMap.put(name, typeMap);
}
else
{
} else {
String fieldType = value.getClass().getSimpleName().toLowerCase(Locale.ROOT);
if ("string".equals(fieldType)) {
fieldType = "keyword";

View File

@ -443,8 +443,7 @@ public class ElasticsearchJobProvider implements JobProvider {
private List<PerPartitionMaxProbabilities> partitionMaxNormalisedProbabilities(String jobId, Object epochStart, Object epochEnd,
String partitionFieldValue)
throws ResourceNotFoundException
{
throws ResourceNotFoundException {
QueryBuilder timeRangeQuery = new ResultsFilterBuilder()
.timeRange(ElasticsearchMappings.ES_TIMESTAMP, epochStart, epochEnd)
.build();

View File

@ -37,21 +37,18 @@ public final class ElasticsearchScripts {
public static final int UPDATE_JOB_RETRY_COUNT = 3;
private ElasticsearchScripts()
{
private ElasticsearchScripts() {
// Do nothing
}
public static Script newUpdateBucketCount(long count)
{
public static Script newUpdateBucketCount(long count) {
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(COUNT_PARAM, count);
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_BUCKET_COUNT, scriptParams);
}
public static Script newUpdateUsage(long additionalBytes, long additionalFields,
long additionalRecords)
{
long additionalRecords) {
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(BYTES_PARAM, additionalBytes);
scriptParams.put(FIELD_COUNT_PARAM, additionalFields);
@ -59,8 +56,7 @@ public final class ElasticsearchScripts {
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_USAGE, scriptParams);
}
public static Script updateProcessingTime(Long processingTimeMs)
{
public static Script updateProcessingTime(Long processingTimeMs) {
Map<String, Object> scriptParams = new HashMap<>();
scriptParams.put(PROCESSING_TIME_PARAM, processingTimeMs);
return new Script(ScriptType.INLINE, PAINLESS, UPDATE_AVERAGE_PROCESSING_TIME, scriptParams);
@ -111,7 +107,7 @@ public final class ElasticsearchScripts {
* @return {@code} true if successful, {@code} false otherwise
*/
public static boolean upsertViaScript(Client client, String index, String type, String docId, Script script,
Map<String, Object> upsertMap) {
Map<String, Object> upsertMap) {
try {
client.prepareUpdate(index, type, docId)
.setScript(script)

View File

@ -5,7 +5,6 @@
*/
package org.elasticsearch.xpack.prelert.job.persistence;
public interface JobDataDeleterFactory
{
public interface JobDataDeleterFactory {
JobDataDeleter newDeleter(String jobId);
}

View File

@ -10,8 +10,7 @@ package org.elasticsearch.xpack.prelert.job.persistence;
* This may create a new JobProvider or return an existing
* one if it is thread safe and shareable.
*/
public interface JobProviderFactory
{
public interface JobProviderFactory {
/**
* Get a {@linkplain JobProvider}
*/

View File

@ -17,8 +17,7 @@ import org.elasticsearch.xpack.prelert.job.results.ModelDebugOutput;
import java.util.Optional;
public interface JobResultsProvider
{
public interface JobResultsProvider {
/**
* Search for buckets with the parameters in the {@link BucketsQueryBuilder}
* @return QueryPage of Buckets

View File

@ -74,8 +74,8 @@ class CsvDataToProcessWriter extends AbstractDataToProcessWriter {
try (CsvListReader csvReader = new CsvListReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8), csvPref)) {
String[] header = csvReader.getHeader(true);
if (header == null) // null if EoF
{
if (header == null) { // null if EoF
return statusReporter.incrementalStats();
}

View File

@ -21,8 +21,7 @@ import java.util.Objects;
* Anomaly Cause POJO.
* Used as a nested level inside population anomaly records.
*/
public class AnomalyCause extends ToXContentToBytes implements Writeable
{
public class AnomalyCause extends ToXContentToBytes implements Writeable {
public static final ParseField ANOMALY_CAUSE = new ParseField("anomaly_cause");
/**
* Result fields
@ -188,150 +187,121 @@ public class AnomalyCause extends ToXContentToBytes implements Writeable
}
public double getProbability()
{
public double getProbability() {
return probability;
}
public void setProbability(double value)
{
public void setProbability(double value) {
probability = value;
}
public String getByFieldName()
{
public String getByFieldName() {
return byFieldName;
}
public void setByFieldName(String value)
{
public void setByFieldName(String value) {
byFieldName = value.intern();
}
public String getByFieldValue()
{
public String getByFieldValue() {
return byFieldValue;
}
public void setByFieldValue(String value)
{
public void setByFieldValue(String value) {
byFieldValue = value.intern();
}
public String getCorrelatedByFieldValue()
{
public String getCorrelatedByFieldValue() {
return correlatedByFieldValue;
}
public void setCorrelatedByFieldValue(String value)
{
public void setCorrelatedByFieldValue(String value) {
correlatedByFieldValue = value.intern();
}
public String getPartitionFieldName()
{
public String getPartitionFieldName() {
return partitionFieldName;
}
public void setPartitionFieldName(String field)
{
public void setPartitionFieldName(String field) {
partitionFieldName = field.intern();
}
public String getPartitionFieldValue()
{
public String getPartitionFieldValue() {
return partitionFieldValue;
}
public void setPartitionFieldValue(String value)
{
public void setPartitionFieldValue(String value) {
partitionFieldValue = value.intern();
}
public String getFunction()
{
public String getFunction() {
return function;
}
public void setFunction(String name)
{
public void setFunction(String name) {
function = name.intern();
}
public String getFunctionDescription()
{
public String getFunctionDescription() {
return functionDescription;
}
public void setFunctionDescription(String functionDescription)
{
public void setFunctionDescription(String functionDescription) {
this.functionDescription = functionDescription.intern();
}
public List<Double> getTypical()
{
public List<Double> getTypical() {
return typical;
}
public void setTypical(List<Double> typical)
{
public void setTypical(List<Double> typical) {
this.typical = typical;
}
public List<Double> getActual()
{
public List<Double> getActual() {
return actual;
}
public void setActual(List<Double> actual)
{
public void setActual(List<Double> actual) {
this.actual = actual;
}
public String getFieldName()
{
public String getFieldName() {
return fieldName;
}
public void setFieldName(String field)
{
public void setFieldName(String field) {
fieldName = field.intern();
}
public String getOverFieldName()
{
public String getOverFieldName() {
return overFieldName;
}
public void setOverFieldName(String name)
{
public void setOverFieldName(String name) {
overFieldName = name.intern();
}
public String getOverFieldValue()
{
public String getOverFieldValue() {
return overFieldValue;
}
public void setOverFieldValue(String value)
{
public void setOverFieldValue(String value) {
overFieldValue = value.intern();
}
public List<Influence> getInfluencers()
{
public List<Influence> getInfluencers() {
return influencers;
}
public void setInfluencers(List<Influence> influencers)
{
public void setInfluencers(List<Influence> influencers) {
this.influencers = influencers;
}
@Override
public int hashCode()
{
public int hashCode() {
return Objects.hash(probability,
actual,
typical,
@ -349,15 +319,12 @@ public class AnomalyCause extends ToXContentToBytes implements Writeable
}
@Override
public boolean equals(Object other)
{
if (this == other)
{
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof AnomalyCause == false)
{
if (other instanceof AnomalyCause == false) {
return false;
}

View File

@ -27,8 +27,7 @@ import java.util.Objects;
* reserved words that are likely to clash with fields in the input data (due to
* the restrictions on Elasticsearch mappings).
*/
public class ModelDebugOutput extends ToXContentToBytes implements Writeable
{
public class ModelDebugOutput extends ToXContentToBytes implements Writeable {
public static final ParseField TYPE = new ParseField("model_debug_output");
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ParseField PARTITION_FIELD_NAME = new ParseField("partition_field_name");
@ -169,145 +168,116 @@ public class ModelDebugOutput extends ToXContentToBytes implements Writeable
return jobId;
}
public String getId()
{
public String getId() {
return id;
}
public void setId(String id)
{
public void setId(String id) {
this.id = id;
}
public Date getTimestamp()
{
public Date getTimestamp() {
return timestamp;
}
public void setTimestamp(Date timestamp)
{
public void setTimestamp(Date timestamp) {
this.timestamp = timestamp;
}
public String getPartitionFieldName()
{
public String getPartitionFieldName() {
return partitionFieldName;
}
public void setPartitionFieldName(String partitionFieldName)
{
public void setPartitionFieldName(String partitionFieldName) {
this.partitionFieldName = partitionFieldName;
}
public String getPartitionFieldValue()
{
public String getPartitionFieldValue() {
return partitionFieldValue;
}
public void setPartitionFieldValue(String partitionFieldValue)
{
public void setPartitionFieldValue(String partitionFieldValue) {
this.partitionFieldValue = partitionFieldValue;
}
public String getOverFieldName()
{
public String getOverFieldName() {
return overFieldName;
}
public void setOverFieldName(String overFieldName)
{
public void setOverFieldName(String overFieldName) {
this.overFieldName = overFieldName;
}
public String getOverFieldValue()
{
public String getOverFieldValue() {
return overFieldValue;
}
public void setOverFieldValue(String overFieldValue)
{
public void setOverFieldValue(String overFieldValue) {
this.overFieldValue = overFieldValue;
}
public String getByFieldName()
{
public String getByFieldName() {
return byFieldName;
}
public void setByFieldName(String byFieldName)
{
public void setByFieldName(String byFieldName) {
this.byFieldName = byFieldName;
}
public String getByFieldValue()
{
public String getByFieldValue() {
return byFieldValue;
}
public void setByFieldValue(String byFieldValue)
{
public void setByFieldValue(String byFieldValue) {
this.byFieldValue = byFieldValue;
}
public String getDebugFeature()
{
public String getDebugFeature() {
return debugFeature;
}
public void setDebugFeature(String debugFeature)
{
public void setDebugFeature(String debugFeature) {
this.debugFeature = debugFeature;
}
public double getDebugLower()
{
public double getDebugLower() {
return debugLower;
}
public void setDebugLower(double debugLower)
{
public void setDebugLower(double debugLower) {
this.debugLower = debugLower;
}
public double getDebugUpper()
{
public double getDebugUpper() {
return debugUpper;
}
public void setDebugUpper(double debugUpper)
{
public void setDebugUpper(double debugUpper) {
this.debugUpper = debugUpper;
}
public double getDebugMedian()
{
public double getDebugMedian() {
return debugMedian;
}
public void setDebugMedian(double debugMedian)
{
public void setDebugMedian(double debugMedian) {
this.debugMedian = debugMedian;
}
public double getActual()
{
public double getActual() {
return actual;
}
public void setActual(double actual)
{
public void setActual(double actual) {
this.actual = actual;
}
@Override
public boolean equals(Object other)
{
if (this == other)
{
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof ModelDebugOutput == false)
{
if (other instanceof ModelDebugOutput == false) {
return false;
}
// id excluded here as it is generated by the datastore
@ -328,8 +298,7 @@ public class ModelDebugOutput extends ToXContentToBytes implements Writeable
}
@Override
public int hashCode()
{
public int hashCode() {
// id excluded here as it is generated by the datastore
return Objects.hash(jobId, timestamp, partitionFieldName, partitionFieldValue,
overFieldName, overFieldValue, byFieldName, byFieldValue,

View File

@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
@FunctionalInterface
public interface ArgumentVerifier
{
public interface ArgumentVerifier {
void verify(String argument, TransformConfig tc) throws ElasticsearchParseException;
}

View File

@ -12,8 +12,7 @@ import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
import java.util.List;
import java.util.regex.Pattern;
public class RegexExtractVerifier implements ArgumentVerifier
{
public class RegexExtractVerifier implements ArgumentVerifier {
@Override
public void verify(String arg, TransformConfig tc) {
new RegexPatternVerifier().verify(arg, tc);

View File

@ -12,8 +12,7 @@ import org.elasticsearch.xpack.prelert.job.transform.TransformConfig;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
public class RegexPatternVerifier implements ArgumentVerifier
{
public class RegexPatternVerifier implements ArgumentVerifier {
@Override
public void verify(String arg, TransformConfig tc) throws ElasticsearchParseException {
try {

View File

@ -13,10 +13,8 @@ import org.elasticsearch.xpack.prelert.job.transform.TransformType;
import java.util.List;
public final class TransformConfigVerifier
{
private TransformConfigVerifier()
{
public final class TransformConfigVerifier {
private TransformConfigVerifier() {
// Hide default constructor
}

View File

@ -14,10 +14,8 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
public class TransformConfigsVerifier
{
private TransformConfigsVerifier()
{
public class TransformConfigsVerifier {
private TransformConfigsVerifier() {
}
/**
@ -28,24 +26,20 @@ public class TransformConfigsVerifier
* <li>Check there are no circular dependencies in the transforms</li>
* </ol>
*/
public static boolean verify(List<TransformConfig> transforms) throws ElasticsearchParseException
{
for (TransformConfig tr : transforms)
{
public static boolean verify(List<TransformConfig> transforms) throws ElasticsearchParseException {
for (TransformConfig tr : transforms) {
TransformConfigVerifier.verify(tr);
}
String duplicatedName = outputNamesAreUnique(transforms);
if (duplicatedName != null)
{
if (duplicatedName != null) {
String msg = Messages.getMessage(Messages.JOB_CONFIG_TRANSFORM_OUTPUT_NAME_USED_MORE_THAN_ONCE, duplicatedName);
throw new IllegalArgumentException(msg);
}
// Check for circular dependencies
int index = checkForCircularDependencies(transforms);
if (index >= 0)
{
if (index >= 0) {
TransformConfig tc = transforms.get(index);
String msg = Messages.getMessage(Messages.JOB_CONFIG_TRANSFORM_CIRCULAR_DEPENDENCY, tc.type(), tc.getInputs());
throw new IllegalArgumentException(msg);
@ -60,15 +54,11 @@ public class TransformConfigsVerifier
* unique or the first duplicate name if there are
* duplications
*/
private static String outputNamesAreUnique(List<TransformConfig> transforms)
{
private static String outputNamesAreUnique(List<TransformConfig> transforms) {
Set<String> fields = new HashSet<>();
for (TransformConfig t : transforms)
{
for (String output : t.getOutputs())
{
if (fields.contains(output))
{
for (TransformConfig t : transforms) {
for (String output : t.getOutputs()) {
if (fields.contains(output)) {
return output;
}
fields.add(output);
@ -78,8 +68,6 @@ public class TransformConfigsVerifier
return null;
}
/**
* Find circular dependencies in the list of transforms.
* This might be because a transform's input is its output
@ -92,16 +80,13 @@ public class TransformConfigsVerifier
* @return -1 if no circular dependencies else the index of the
* transform at the start of the circular chain
*/
public static int checkForCircularDependencies(List<TransformConfig> transforms)
{
for (int i=0; i<transforms.size(); i++)
{
public static int checkForCircularDependencies(List<TransformConfig> transforms) {
for (int i=0; i<transforms.size(); i++) {
Set<Integer> chain = new HashSet<Integer>();
chain.add(new Integer(i));
TransformConfig tc = transforms.get(i);
if (checkCircularDependenciesRecursive(tc, transforms, chain) == false)
{
if (checkCircularDependenciesRecursive(tc, transforms, chain) == false) {
return i;
}
}
@ -110,23 +95,17 @@ public class TransformConfigsVerifier
}
private static boolean checkCircularDependenciesRecursive(TransformConfig transform,
List<TransformConfig> transforms,
Set<Integer> chain)
{
private static boolean checkCircularDependenciesRecursive(TransformConfig transform, List<TransformConfig> transforms,
Set<Integer> chain) {
boolean result = true;
for (int i=0; i<transforms.size(); i++)
{
for (int i=0; i<transforms.size(); i++) {
TransformConfig tc = transforms.get(i);
for (String input : transform.getInputs())
{
if (tc.getOutputs().contains(input))
{
for (String input : transform.getInputs()) {
if (tc.getOutputs().contains(input)) {
Integer index = new Integer(i);
if (chain.contains(index))
{
if (chain.contains(index)) {
return false;
}

View File

@ -43,8 +43,7 @@ public class TransformFactory {
index = scratchAreaIndexesMap.get(field);
if (index != null) {
readIndexes.add(new TransformIndex(SCRATCH_ARRAY_INDEX, index));
} else if (outputIndexesMap.containsKey(field)) // also check the outputs array for this input
{
} else if (outputIndexesMap.containsKey(field)) { // also check the outputs array for this input
index = outputIndexesMap.get(field);
readIndexes.add(new TransformIndex(SCRATCH_ARRAY_INDEX, index));
} else {

View File

@ -11,13 +11,10 @@ import java.util.regex.Pattern;
* Another String utilities class. Class name is prefixed with Prelert to avoid confusion
* with one of the myriad String utility classes out there.
*/
public final class PrelertStrings
{
public final class PrelertStrings {
private static final Pattern NEEDS_QUOTING = Pattern.compile("\\W");
private PrelertStrings()
{
// do nothing
private PrelertStrings() {
}
/**
@ -29,21 +26,17 @@ public final class PrelertStrings
* @return {@code input} when it does not contain non-word characters, or a new string
* that contains {@code input} surrounded by double quotes otherwise
*/
public static String doubleQuoteIfNotAlphaNumeric(String input)
{
if (!NEEDS_QUOTING.matcher(input).find())
{
public static String doubleQuoteIfNotAlphaNumeric(String input) {
if (!NEEDS_QUOTING.matcher(input).find()) {
return input;
}
StringBuilder quoted = new StringBuilder();
quoted.append('\"');
for (int i = 0; i < input.length(); ++i)
{
for (int i = 0; i < input.length(); ++i) {
char c = input.charAt(i);
if (c == '\"' || c == '\\')
{
if (c == '\"' || c == '\\') {
quoted.append('\\');
}
quoted.append(c);

View File

@ -25,15 +25,12 @@ import java.time.temporal.TemporalAccessor;
* <p> Objects of this class are <b>immutable</b> and <b>thread-safe</b>
*
*/
public class DateTimeFormatterTimestampConverter implements TimestampConverter
{
public class DateTimeFormatterTimestampConverter implements TimestampConverter {
private final DateTimeFormatter formatter;
private final boolean hasTimeZone;
private final ZoneId defaultZoneId;
private DateTimeFormatterTimestampConverter(DateTimeFormatter dateTimeFormatter,
boolean hasTimeZone, ZoneId defaultTimezone)
{
private DateTimeFormatterTimestampConverter(DateTimeFormatter dateTimeFormatter, boolean hasTimeZone, ZoneId defaultTimezone) {
formatter = dateTimeFormatter;
this.hasTimeZone = hasTimeZone;
defaultZoneId = defaultTimezone;
@ -48,8 +45,7 @@ public class DateTimeFormatterTimestampConverter implements TimestampConverter
* @throws IllegalArgumentException if the pattern is invalid or cannot produce a full timestamp
* (e.g. contains a date but not a time)
*/
public static TimestampConverter ofPattern(String pattern)
{
public static TimestampConverter ofPattern(String pattern) {
return ofPattern(pattern, ZoneOffset.systemDefault());
}
@ -62,8 +58,7 @@ public class DateTimeFormatterTimestampConverter implements TimestampConverter
* @throws IllegalArgumentException if the pattern is invalid or cannot produce a full timestamp
* (e.g. contains a date but not a time)
*/
public static TimestampConverter ofPattern(String pattern, ZoneId defaultTimezone)
{
public static TimestampConverter ofPattern(String pattern, ZoneId defaultTimezone) {
DateTimeFormatter formatter = new DateTimeFormatterBuilder()
.parseLenient()
.appendPattern(pattern)
@ -71,43 +66,35 @@ public class DateTimeFormatterTimestampConverter implements TimestampConverter
.toFormatter();
String now = formatter.format(ZonedDateTime.ofInstant(Instant.ofEpochSecond(0), ZoneOffset.UTC));
try
{
try {
TemporalAccessor parsed = formatter.parse(now);
boolean hasTimeZone = parsed.isSupported(ChronoField.INSTANT_SECONDS);
if (hasTimeZone)
{
if (hasTimeZone) {
Instant.from(parsed);
}
else
{
else {
LocalDateTime.from(parsed);
}
return new DateTimeFormatterTimestampConverter(formatter, hasTimeZone, defaultTimezone);
}
catch (DateTimeException e)
{
catch (DateTimeException e) {
throw new IllegalArgumentException("Timestamp cannot be derived from pattern: " + pattern);
}
}
@Override
public long toEpochSeconds(String timestamp)
{
public long toEpochSeconds(String timestamp) {
return toInstant(timestamp).getEpochSecond();
}
@Override
public long toEpochMillis(String timestamp)
{
public long toEpochMillis(String timestamp) {
return toInstant(timestamp).toEpochMilli();
}
private Instant toInstant(String timestamp)
{
private Instant toInstant(String timestamp) {
TemporalAccessor parsed = formatter.parse(timestamp);
if (hasTimeZone)
{
if (hasTimeZone) {
return Instant.from(parsed);
}
return LocalDateTime.from(parsed).atZone(defaultZoneId).toInstant();

View File

@ -27,8 +27,7 @@ public final class TimeUtils {
public static long dateStringToEpoch(String date) {
try {
long epoch = Long.parseLong(date);
if (date.trim().length() <= 10) // seconds
{
if (date.trim().length() <= 10) { // seconds
return epoch * 1000;
} else {
return epoch;

View File

@ -11,8 +11,7 @@ import java.time.format.DateTimeParseException;
* A converter that enables conversions of textual timestamps to epoch seconds
* or milliseconds according to a given pattern.
*/
public interface TimestampConverter
{
public interface TimestampConverter {
/**
* Converts the a textual timestamp into an epoch in seconds
*

View File

@ -21,8 +21,7 @@ public class ProblemTrackerTests extends ESTestCase {
private ProblemTracker problemTracker;
@Before
public void setUpTests()
{
public void setUpTests() {
auditor = mock(Auditor.class);
problemTracker = new ProblemTracker(() -> auditor);
}

View File

@ -7,8 +7,7 @@ package org.elasticsearch.xpack.prelert.utils.time;
import org.elasticsearch.test.ESTestCase;
public class TimeUtilsTests extends ESTestCase
{
public class TimeUtilsTests extends ESTestCase {
public void testdateStringToEpoch() {
assertEquals(1462096800000L, TimeUtils.dateStringToEpoch("2016-05-01T10:00:00Z"));
assertEquals(1462096800333L, TimeUtils.dateStringToEpoch("2016-05-01T10:00:00.333Z"));