Replace ThrottlerField -> Field in comments and string constants (elastic/x-pack-elasticsearch#4238)

Original commit: elastic/x-pack-elasticsearch@97b449d29d
This commit is contained in:
David Kyle 2018-03-30 21:52:28 +01:00 committed by GitHub
parent 574ce84885
commit a4fca07c01
21 changed files with 27 additions and 27 deletions

View File

@ -96,7 +96,7 @@ public class XPackLicenseState {
return new String[] {
"The following X-Pack security functionality will be disabled: authentication, authorization, " +
"ip filtering, and auditing. Please restart your node after applying the license.",
"ThrottlerField and document level access control will be disabled.",
"Field and document level access control will be disabled.",
"Custom realms will be ignored."
};
}
@ -109,7 +109,7 @@ public class XPackLicenseState {
case TRIAL:
case PLATINUM:
return new String[] {
"ThrottlerField and document level access control will be disabled.",
"Field and document level access control will be disabled.",
"Custom realms will be ignored."
};
}
@ -124,7 +124,7 @@ public class XPackLicenseState {
return new String[] {
"Authentication will be limited to the native realms.",
"IP filtering and auditing will be disabled.",
"ThrottlerField and document level access control will be disabled.",
"Field and document level access control will be disabled.",
"Custom realms will be ignored."
};
}
@ -322,7 +322,7 @@ public class XPackLicenseState {
}
/**
* Determine if Document Level Security (DLS) and ThrottlerField Level Security (FLS) should be enabled.
* Determine if Document Level Security (DLS) and Field Level Security (FLS) should be enabled.
* <p>
* DLS and FLS are only disabled when the mode is not:
* <ul>

View File

@ -364,7 +364,7 @@ public class Detector implements ToXContentObject, Writeable {
* Segments the analysis along another field to have completely
* independent baselines for each instance of partitionfield
*
* @return The Partition ThrottlerField
* @return The Partition Field
*/
public String getPartitionFieldName() {
return partitionFieldName;

View File

@ -58,7 +58,7 @@ public class Job extends AbstractDiffable<Job> implements Writeable, ToXContentO
public static final String ANOMALY_DETECTOR_JOB_TYPE = "anomaly_detector";
/*
* ThrottlerField names used in serialization
* Field names used in serialization
*/
public static final ParseField ID = new ParseField("job_id");
public static final ParseField JOB_TYPE = new ParseField("job_type");

View File

@ -134,10 +134,10 @@ public final class Messages {
public static final String JOB_CONFIG_INVALID_CREATE_SETTINGS =
"The job is configured with fields [{0}] that are illegal to set at job creation";
public static final String JOB_CONFIG_INVALID_FIELDNAME_CHARS =
"Invalid field name ''{0}''. ThrottlerField names including over, by and partition " +
"Invalid field name ''{0}''. Field names including over, by and partition " +
"fields cannot contain any of these characters: {1}";
public static final String JOB_CONFIG_INVALID_FIELDNAME =
"Invalid field name ''{0}''. ThrottlerField names including over, by and partition fields cannot be ''{1}''";
"Invalid field name ''{0}''. Field names including over, by and partition fields cannot be ''{1}''";
public static final String JOB_CONFIG_INVALID_TIMEFORMAT = "Invalid Time format string ''{0}''";
public static final String JOB_CONFIG_MISSING_ANALYSISCONFIG = "An analysis_config must be set";
public static final String JOB_CONFIG_MISSING_DATA_DESCRIPTION = "A data_description must be set";

View File

@ -26,7 +26,7 @@ import java.util.Objects;
*/
public class FlushAcknowledgement implements ToXContentObject, Writeable {
/**
* ThrottlerField Names
* Field Names
*/
public static final ParseField TYPE = new ParseField("flush");
public static final ParseField ID = new ParseField("id");

View File

@ -36,7 +36,7 @@ public class ModelSizeStats implements ToXContentObject, Writeable {
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
/**
* ThrottlerField Names
* Field Names
*/
public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes");
public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count");

View File

@ -37,7 +37,7 @@ import java.util.Objects;
*/
public class ModelSnapshot implements ToXContentObject, Writeable {
/**
* ThrottlerField Names
* Field Names
*/
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ParseField DESCRIPTION = new ParseField("description");

View File

@ -25,7 +25,7 @@ import java.util.Objects;
public class Quantiles implements ToXContentObject, Writeable {
/**
* ThrottlerField Names
* Field Names
*/
public static final ParseField TIMESTAMP = new ParseField("timestamp");
public static final ParseField QUANTILE_STATE = new ParseField("quantile_state");

View File

@ -32,7 +32,7 @@ import java.util.Optional;
*/
public class Bucket implements ToXContentObject, Writeable {
/*
* ThrottlerField Names
* Field Names
*/
private static final ParseField JOB_ID = Job.ID;

View File

@ -31,7 +31,7 @@ public class BucketInfluencer implements ToXContentObject, Writeable {
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
/**
* ThrottlerField names
* Field names
*/
public static final ParseField INFLUENCER_FIELD_NAME = new ParseField("influencer_field_name");
public static final ParseField INITIAL_ANOMALY_SCORE = new ParseField("initial_anomaly_score");

View File

@ -31,7 +31,7 @@ public class Influencer implements ToXContentObject, Writeable {
public static final ParseField RESULT_TYPE_FIELD = new ParseField(RESULT_TYPE_VALUE);
/*
* ThrottlerField names
* Field names
*/
public static final ParseField PROBABILITY = new ParseField("probability");
public static final ParseField BUCKET_SPAN = new ParseField("bucket_span");

View File

@ -47,7 +47,7 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArg
* <table cellspacing="8">
* <caption>Fields in cron expressions</caption>
* <tr>
* <th align="left">ThrottlerField Name</th>
* <th align="left">Field Name</th>
* <th align="left">&nbsp;</th>
* <th align="left">Allowed Values</th>
* <th align="left">&nbsp;</th>

View File

@ -320,11 +320,11 @@ public class RoleDescriptor implements ToXContentObject {
}
if (indexPrivileges != null) {
if (Arrays.stream(indexPrivileges).anyMatch(IndicesPrivileges::isUsingFieldLevelSecurity)) {
throw new ElasticsearchParseException("ThrottlerField [{}] is not supported in a has_privileges request",
throw new ElasticsearchParseException("Field [{}] is not supported in a has_privileges request",
RoleDescriptor.Fields.FIELD_PERMISSIONS);
}
if (Arrays.stream(indexPrivileges).anyMatch(IndicesPrivileges::isUsingDocumentLevelSecurity)) {
throw new ElasticsearchParseException("ThrottlerField [{}] is not supported in a has_privileges request", Fields.QUERY);
throw new ElasticsearchParseException("Field [{}] is not supported in a has_privileges request", Fields.QUERY);
}
}
return new RoleDescriptor(description, clusterPrivileges, indexPrivileges, null);

View File

@ -80,7 +80,7 @@ import static org.apache.lucene.search.BooleanClause.Occur.SHOULD;
* <p>
* Based on the {@link ThreadContext} this class will enable field and/or document level security.
* <p>
* ThrottlerField level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader}
* Field level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader}
* in the {@link #wrap(DirectoryReader)} method.
* <p>
* Document level security is enabled by wrapping the original {@link DirectoryReader} in a {@link DocumentSubsetReader}

View File

@ -33,7 +33,7 @@ import static org.apache.lucene.util.automaton.Operations.subsetOf;
/**
* Stores patterns to fields which access is granted or denied to and maintains an automaton that can be used to check if permission is
* allowed for a specific field.
* ThrottlerField permissions are configured via a list of strings that are patterns a field has to match. Two lists determine whether or
* Field permissions are configured via a list of strings that are patterns a field has to match. Two lists determine whether or
* not a field is granted access to:
* 1. It has to match the patterns in grantedFieldsArray
* 2. it must not match the patterns in deniedFieldsArray

View File

@ -10,7 +10,7 @@ import java.util.Collections;
import java.util.Set;
/**
* Represents the definition of a {@link FieldPermissions}. ThrottlerField permissions are defined as a
* Represents the definition of a {@link FieldPermissions}. Field permissions are defined as a
* collections of grant and exclude definitions where the exclude definition must be a subset of
* the grant definition.
*/

View File

@ -121,9 +121,9 @@ class ExtractedFields {
method = ExtractedField.ExtractionMethod.DOC_VALUE;
} else if (isText(field)) {
String parentField = MlStrings.getParentField(field);
// ThrottlerField is text so check if it is a multi-field
// Field is text so check if it is a multi-field
if (Objects.equals(parentField, field) == false && fieldsCapabilities.getField(parentField) != null) {
// ThrottlerField is a multi-field which means it won't be available in source. Let's take the parent instead.
// Field is a multi-field which means it won't be available in source. Let's take the parent instead.
internalField = parentField;
method = isAggregatable(parentField) ? ExtractedField.ExtractionMethod.DOC_VALUE
: ExtractedField.ExtractionMethod.SOURCE;

View File

@ -89,7 +89,7 @@ public class AutodetectResultsParser extends AbstractComponent {
consumeAndCloseStream(in);
return false;
} else if (token != XContentParser.Token.START_OBJECT) {
logger.error("Expecting Json ThrottlerField name token after the Start Object token");
logger.error("Expecting Json Field name token after the Start Object token");
consumeAndCloseStream(in);
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}

View File

@ -144,7 +144,7 @@ class CsvDataToProcessWriter extends AbstractDataToProcessWriter {
}
Integer index = inputFieldIndexes.get(field);
if (index == null) {
String msg = String.format(Locale.ROOT, "ThrottlerField configured for analysis '%s' is not in the CSV header '%s'",
String msg = String.format(Locale.ROOT, "Field configured for analysis '%s' is not in the CSV header '%s'",
field, Arrays.toString(header));
LOGGER.error(msg);

View File

@ -23,7 +23,7 @@ import java.util.Objects;
*/
public class CppLogMessage implements ToXContentObject, Writeable {
/**
* ThrottlerField Names (these are defined by log4cxx; we have no control over them)
* Field Names (these are defined by log4cxx; we have no control over them)
*/
public static final ParseField LOGGER_FIELD = new ParseField("logger");
public static final ParseField TIMESTAMP_FIELD = new ParseField("timestamp");

View File

@ -169,7 +169,7 @@ public class JiraIssue implements ToXContentObject {
} else if (Field.ERRORS.match(currentFieldName, parser.getDeprecationHandler())) {
Map<String, Object> fieldErrors = parser.mapOrdered();
for (Map.Entry<String, Object> entry : fieldErrors.entrySet()) {
errors.add("ThrottlerField [" + entry.getKey() + "] has error [" + String.valueOf(entry.getValue()) + "]");
errors.add("Field [" + entry.getKey() + "] has error [" + String.valueOf(entry.getValue()) + "]");
}
} else if (Field.ERROR_MESSAGES.match(currentFieldName, parser.getDeprecationHandler())) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {