Merge branch 'master' into ingest_plugin_api

This commit is contained in:
Ryan Ernst 2016-07-01 12:35:26 -07:00
commit e5caadc4f3
622 changed files with 9204 additions and 4006 deletions

View File

@ -131,8 +131,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
private void response(Snippet response) {
current.println(" - response_body: |")
response.contents.eachLine { current.println(" $it") }
current.println(" - match: ")
current.println(" \$body: ")
response.contents.eachLine { current.println(" $it") }
}
void emitDo(String method, String pathAndQuery,

View File

@ -39,6 +39,25 @@
<module name="EqualsHashCode" />
<!-- Checks that the order of modifiers conforms to the suggestions in the
Java Language specification, sections 8.1.1, 8.3.1 and 8.4.3. It is not that
the standard is perfect, but having a consistent order makes the code more
readable and no other order is compellingly better than the standard.
The correct order is:
public
protected
private
abstract
static
final
transient
volatile
synchronized
native
strictfp
-->
<module name="ModifierOrder" />
<!-- We don't use Java's builtin serialization and we suppress all warning
about it. The flip side of that coin is that we shouldn't _try_ to use
it. We can't outright ban it with ForbiddenApis because it complain about

View File

@ -1263,26 +1263,8 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanOrEqualToParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParseContext.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanEqualToAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LengthAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanOrEqualToAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]MatchAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]spec[/\\]RestApiParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]support[/\\]FileUtils.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]FileUtilsTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]JsonPathTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]RestTestParserTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]test[/\\]InternalTestClusterTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliTool.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]RestGetSettingsAction.java" checks="LineLength" />

View File

@ -36,7 +36,7 @@ public class NamingConventionsCheckBadClasses {
public void testDummy() {}
}
public static abstract class DummyAbstractTests extends UnitTestCase {
public abstract static class DummyAbstractTests extends UnitTestCase {
}
public interface DummyInterfaceTests {

View File

@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
dependencies {
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"

View File

@ -28,7 +28,7 @@ import java.net.URI;
*/
final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
final static String METHOD_NAME = HttpDelete.METHOD_NAME;
static final String METHOD_NAME = HttpDelete.METHOD_NAME;
HttpDeleteWithEntity(final URI uri) {
setURI(uri);

View File

@ -28,7 +28,7 @@ import java.net.URI;
*/
final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
final static String METHOD_NAME = HttpGet.METHOD_NAME;
static final String METHOD_NAME = HttpGet.METHOD_NAME;
HttpGetWithEntity(final URI uri) {
setURI(uri);

View File

@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
dependencies {
compile "org.elasticsearch.client:rest:${version}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"

View File

@ -26,6 +26,9 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
install.enabled = false
uploadArchives.enabled = false
dependencies {
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
compile "junit:junit:${versions.junit}"

View File

@ -1110,7 +1110,7 @@ public long ramBytesUsed() {
this.analyzed.copyBytes(analyzed);
}
private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
private static final class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
BytesRef payload;
long weight;

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
*/
public class StoreRateLimiting {
public static interface Provider {
public interface Provider {
StoreRateLimiting rateLimiting();
}

View File

@ -43,7 +43,7 @@ public class NodeExplanation implements Writeable, ToXContent {
private final String finalExplanation;
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
@Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
final ClusterAllocationExplanation.FinalDecision finalDecision,
final String finalExplanation,
final ClusterAllocationExplanation.StoreCopy storeCopy) {

View File

@ -46,8 +46,6 @@ import static java.util.Collections.unmodifiableMap;
* Node information (static, does not change over time).
*/
public class NodeInfo extends BaseNodeResponse {
@Nullable
private Map<String, String> serviceAttributes;
private Version version;
private Build build;
@ -85,14 +83,13 @@ public class NodeInfo extends BaseNodeResponse {
public NodeInfo() {
}
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
@Nullable ByteSizeValue totalIndexingBuffer) {
super(node);
this.version = version;
this.build = build;
this.serviceAttributes = serviceAttributes;
this.settings = settings;
this.os = os;
this.process = process;
@ -127,14 +124,6 @@ public class NodeInfo extends BaseNodeResponse {
return this.build;
}
/**
* The service attributes of the node.
*/
@Nullable
public Map<String, String> getServiceAttributes() {
return this.serviceAttributes;
}
/**
* The settings of the node.
*/
@ -213,14 +202,6 @@ public class NodeInfo extends BaseNodeResponse {
} else {
totalIndexingBuffer = null;
}
if (in.readBoolean()) {
Map<String, String> builder = new HashMap<>();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.put(in.readString(), in.readString());
}
serviceAttributes = unmodifiableMap(builder);
}
if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in);
}
@ -262,16 +243,6 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true);
out.writeLong(totalIndexingBuffer.bytes());
}
if (getServiceAttributes() == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeVInt(serviceAttributes.size());
for (Map.Entry<String, String> entry : serviceAttributes.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
}
if (settings == null) {
out.writeBoolean(false);
} else {

View File

@ -73,12 +73,6 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
builder.byteSizeField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer());
}
if (nodeInfo.getServiceAttributes() != null) {
for (Map.Entry<String, String> nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
builder.field(nodeAttribute.getKey(), nodeAttribute.getValue());
}
}
builder.startArray("roles");
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
builder.value(role.getRoleName());

View File

@ -364,7 +364,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
throw new ElasticsearchParseException("failed to parse source for create index", e);
}
} else {
settings(new String(source.toBytes(), StandardCharsets.UTF_8));
settings(source.utf8ToString());
}
return this;
}

View File

@ -130,7 +130,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
private static final ToXContent.Params includeDefaultsParams = new ToXContent.Params() {
final static String INCLUDE_DEFAULTS = "include_defaults";
static final String INCLUDE_DEFAULTS = "include_defaults";
@Override
public String param(String key) {

View File

@ -30,7 +30,7 @@ import java.io.IOException;
* when the index is at least {@link #value} old
*/
public class MaxAgeCondition extends Condition<TimeValue> {
public final static String NAME = "max_age";
public static final String NAME = "max_age";
public MaxAgeCondition(TimeValue value) {
super(NAME);

View File

@ -29,7 +29,7 @@ import java.io.IOException;
* when the index has at least {@link #value} docs
*/
public class MaxDocsCondition extends Condition<Long> {
public final static String NAME = "max_docs";
public static final String NAME = "max_docs";
public MaxDocsCondition(Long value) {
super(NAME);

View File

@ -31,8 +31,8 @@ import java.util.EnumSet;
*/
public class CommonStatsFlags implements Streamable, Cloneable {
public final static CommonStatsFlags ALL = new CommonStatsFlags().all();
public final static CommonStatsFlags NONE = new CommonStatsFlags().clear();
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
public static final CommonStatsFlags NONE = new CommonStatsFlags().clear();
private EnumSet<Flag> flags = EnumSet.allOf(Flag.class);
private String[] types = null;

View File

@ -35,7 +35,7 @@ import java.util.Iterator;
*/
public class BulkResponse extends ActionResponse implements Iterable<BulkItemResponse> {
public final static long NO_INGEST_TOOK = -1L;
public static final long NO_INGEST_TOOK = -1L;
private BulkItemResponse[] responses;
private long tookInMillis;

View File

@ -38,7 +38,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
public BulkShardRequest() {
}
BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
BulkShardRequest(ShardId shardId, RefreshPolicy refreshPolicy, BulkItemRequest[] items) {
super(shardId);
this.items = items;
setRefreshPolicy(refreshPolicy);

View File

@ -344,7 +344,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
final ShardId shardId = entry.getKey();
final List<BulkItemRequest> requests = entry.getValue();
BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.getRefreshPolicy(),
BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, bulkRequest.getRefreshPolicy(),
requests.toArray(new BulkItemRequest[requests.size()]));
bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel());
bulkShardRequest.timeout(bulkRequest.timeout());

View File

@ -71,8 +71,8 @@ import static org.elasticsearch.action.support.replication.ReplicationOperation.
*/
public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequest, BulkShardResponse> {
private final static String OP_TYPE_UPDATE = "update";
private final static String OP_TYPE_DELETE = "delete";
private static final String OP_TYPE_UPDATE = "update";
private static final String OP_TYPE_DELETE = "delete";
public static final String ACTION_NAME = BulkAction.NAME + "[s]";

View File

@ -131,7 +131,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call.
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext(), false);
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);

View File

@ -615,17 +615,17 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
}
}
private final static class Fields {
final static String MAX_DOC = new String("max_doc");
final static String DOC_COUNT = new String("doc_count");
final static String DENSITY = new String("density");
final static String SUM_DOC_FREQ = new String("sum_doc_freq");
final static String SUM_TOTAL_TERM_FREQ = new String("sum_total_term_freq");
final static String SEARCHABLE = new String("searchable");
final static String AGGREGATABLE = new String("aggregatable");
final static String MIN_VALUE = new String("min_value");
final static String MIN_VALUE_AS_STRING = new String("min_value_as_string");
final static String MAX_VALUE = new String("max_value");
final static String MAX_VALUE_AS_STRING = new String("max_value_as_string");
private static final class Fields {
static final String MAX_DOC = new String("max_doc");
static final String DOC_COUNT = new String("doc_count");
static final String DENSITY = new String("density");
static final String SUM_DOC_FREQ = new String("sum_doc_freq");
static final String SUM_TOTAL_TERM_FREQ = new String("sum_total_term_freq");
static final String SEARCHABLE = new String("searchable");
static final String AGGREGATABLE = new String("aggregatable");
static final String MIN_VALUE = new String("min_value");
static final String MIN_VALUE_AS_STRING = new String("min_value_as_string");
static final String MAX_VALUE = new String("max_value");
static final String MAX_VALUE_AS_STRING = new String("max_value_as_string");
}
}

View File

@ -39,7 +39,7 @@ import java.util.List;
*/
public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
public final static String DEFAULT_LEVEL = "cluster";
public static final String DEFAULT_LEVEL = "cluster";
private String[] fields = Strings.EMPTY_ARRAY;
private String level = DEFAULT_LEVEL;

View File

@ -132,7 +132,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
return Integer.MAX_VALUE;
}
final static class BulkRequestModifier implements Iterator<ActionRequest<?>> {
static final class BulkRequestModifier implements Iterator<ActionRequest<?>> {
final BulkRequest bulkRequest;
final Set<Integer> failedSlots;
@ -210,7 +210,7 @@ public final class IngestActionFilter extends AbstractComponent implements Actio
}
final static class IngestBulkResponseListener implements ActionListener<BulkResponse> {
static final class IngestBulkResponseListener implements ActionListener<BulkResponse> {
private final long ingestTookInMillis;
private final int[] originalSlots;

View File

@ -168,7 +168,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
*
* @return The profile results or an empty map
*/
public @Nullable Map<String, ProfileShardResult> getProfileResults() {
@Nullable public Map<String, ProfileShardResult> getProfileResults() {
return internalResponse.profile();
}

View File

@ -134,7 +134,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
});
}
final static class SearchRequestSlot {
static final class SearchRequestSlot {
final SearchRequest request;
final int responseSlot;

View File

@ -33,7 +33,7 @@ import java.util.List;
*/
public abstract class AbstractListenableActionFuture<T, L> extends AdapterActionFuture<T, L> implements ListenableActionFuture<T> {
private final static ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
private static final ESLogger logger = Loggers.getLogger(AbstractListenableActionFuture.class);
final ThreadPool threadPool;
volatile Object listeners;

View File

@ -55,7 +55,7 @@ public interface ActionFilter {
* filter chain. This base class should serve any action filter implementations that doesn't require
* to apply async filtering logic.
*/
public static abstract class Simple extends AbstractComponent implements ActionFilter {
public abstract static class Simple extends AbstractComponent implements ActionFilter {
protected Simple(Settings settings) {
super(settings);

View File

@ -36,5 +36,5 @@ public interface WriteResponse {
* {@link RefreshPolicy#IMMEDIATE} should always mark this as true. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will only
* set this to true if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
*/
public abstract void setForcedRefresh(boolean forcedRefresh);
void setForcedRefresh(boolean forcedRefresh);
}

View File

@ -590,7 +590,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
* Can be used for implementations of {@link #shardOperation(BroadcastRequest, ShardRouting) shardOperation} for
* which there is no shard-level return value.
*/
public final static class EmptyResult implements Streamable {
public static final class EmptyResult implements Streamable {
public static EmptyResult INSTANCE = new EmptyResult();
private EmptyResult() {

View File

@ -50,11 +50,11 @@ public class ReplicationOperation<
ReplicaRequest extends ReplicationRequest<ReplicaRequest>,
PrimaryResultT extends ReplicationOperation.PrimaryResult<ReplicaRequest>
> {
final private ESLogger logger;
final private Request request;
final private Supplier<ClusterState> clusterStateSupplier;
final private String opType;
final private AtomicInteger totalShards = new AtomicInteger();
private final ESLogger logger;
private final Request request;
private final Supplier<ClusterState> clusterStateSupplier;
private final String opType;
private final AtomicInteger totalShards = new AtomicInteger();
/**
* The number of pending sub-operations in this operation. This is incremented when the following operations start and decremented when
* they complete:
@ -65,14 +65,14 @@ public class ReplicationOperation<
* operations and the primary finishes.</li>
* </ul>
*/
final private AtomicInteger pendingShards = new AtomicInteger();
final private AtomicInteger successfulShards = new AtomicInteger();
final private boolean executeOnReplicas;
final private boolean checkWriteConsistency;
final private Primary<Request, ReplicaRequest, PrimaryResultT> primary;
final private Replicas<ReplicaRequest> replicasProxy;
final private AtomicBoolean finished = new AtomicBoolean();
final protected ActionListener<PrimaryResultT> resultListener;
private final AtomicInteger pendingShards = new AtomicInteger();
private final AtomicInteger successfulShards = new AtomicInteger();
private final boolean executeOnReplicas;
private final boolean checkWriteConsistency;
private final Primary<Request, ReplicaRequest, PrimaryResultT> primary;
private final Replicas<ReplicaRequest> replicasProxy;
private final AtomicBoolean finished = new AtomicBoolean();
protected final ActionListener<PrimaryResultT> resultListener;
private volatile PrimaryResultT primaryResult = null;

View File

@ -124,9 +124,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
* @return the shardId of the shard where this operation should be executed on.
* can be null if the shardID has not yet been resolved
*/
public
@Nullable
ShardId shardId() {
public ShardId shardId() {
return shardId;
}

View File

@ -40,7 +40,7 @@ import java.io.IOException;
*/
public class ReplicationResponse extends ActionResponse {
public final static ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0];
public static final ReplicationResponse.ShardInfo.Failure[] EMPTY = new ReplicationResponse.ShardInfo.Failure[0];
private ShardInfo shardInfo;
@ -298,7 +298,6 @@ public class ReplicationResponse extends ActionResponse {
private static final String _SHARDS = "_shards";
private static final String TOTAL = "total";
private static final String SUCCESSFUL = "successful";
private static final String PENDING = "pending";
private static final String FAILED = "failed";
private static final String FAILURES = "failures";

View File

@ -85,17 +85,17 @@ public abstract class TransportReplicationAction<
Response extends ReplicationResponse
> extends TransportAction<Request, Response> {
final protected TransportService transportService;
final protected ClusterService clusterService;
final protected IndicesService indicesService;
final private ShardStateAction shardStateAction;
final private WriteConsistencyLevel defaultWriteConsistencyLevel;
final private TransportRequestOptions transportOptions;
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final IndicesService indicesService;
private final ShardStateAction shardStateAction;
private final WriteConsistencyLevel defaultWriteConsistencyLevel;
private final TransportRequestOptions transportOptions;
// package private for testing
final String transportReplicaAction;
final String transportPrimaryAction;
final private ReplicasProxy replicasProxy;
private final ReplicasProxy replicasProxy;
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, IndicesService indicesService,

View File

@ -205,7 +205,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
});
}
void retry(final @Nullable Throwable failure) {
void retry(@Nullable final Throwable failure) {
if (observer.isTimedOut()) {
// we running as a last attempt after a timeout has happened. don't retry
Throwable listenFailure = failure;

View File

@ -130,7 +130,7 @@ public final class TermVectorsFields extends Fields {
* @param termVectors Stores the actual term vectors as a {@link BytesRef}.
*/
public TermVectorsFields(BytesReference headerRef, BytesReference termVectors) throws IOException {
StreamInput header = StreamInput.wrap(headerRef.toBytesArray());
StreamInput header = headerRef.streamInput();
fieldMap = new ObjectLongHashMap<>();
// here we read the header to fill the field offset map
String headerString = header.readString();
@ -201,7 +201,7 @@ public final class TermVectorsFields extends Fields {
private int docCount;
public TermVector(BytesReference termVectors, long readOffset) throws IOException {
this.perFieldTermVectorInput = StreamInput.wrap(termVectors.toBytesArray());
this.perFieldTermVectorInput = termVectors.streamInput();
this.readOffset = readOffset;
reset();
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -157,7 +158,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
this.id = other.id();
this.type = other.type();
if (this.doc != null) {
this.doc = other.doc().copyBytesArray();
this.doc = new BytesArray(other.doc().toBytesRef(), true);
}
this.flagsEnum = other.getFlags().clone();
this.preference = other.preference();
@ -594,7 +595,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
} else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) {
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
} else if (currentFieldName.equals("filter")) {
termVectorsRequest.filterSettings(readFilterSettings(parser, termVectorsRequest));
termVectorsRequest.filterSettings(readFilterSettings(parser));
} else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing.
termVectorsRequest.index = parser.text();
} else if ("_type".equals(currentFieldName)) {
@ -640,7 +641,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
return mapStrStr;
}
private static FilterSettings readFilterSettings(XContentParser parser, TermVectorsRequest termVectorsRequest) throws IOException {
private static FilterSettings readFilterSettings(XContentParser parser) throws IOException {
FilterSettings settings = new FilterSettings();
XContentParser.Token token;
String currentFieldName = null;

View File

@ -143,8 +143,8 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent {
public Fields getFields() throws IOException {
if (hasTermVectors() && isExists()) {
if (!sourceCopied) { // make the bytes safe
headerRef = headerRef.copyBytesArray();
termVectors = termVectors.copyBytesArray();
headerRef = new BytesArray(headerRef.toBytesRef(), true);
termVectors = new BytesArray(termVectors.toBytesRef(), true);
}
TermVectorsFields termVectorsFields = new TermVectorsFields(headerRef, termVectors);
hasScores = termVectorsFields.hasScores;

View File

@ -22,8 +22,6 @@ package org.elasticsearch.action.update;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
@ -63,13 +61,11 @@ import java.util.Map;
public class UpdateHelper extends AbstractComponent {
private final ScriptService scriptService;
private final ClusterService clusterService;
@Inject
public UpdateHelper(Settings settings, ScriptService scriptService, ClusterService clusterService) {
public UpdateHelper(Settings settings, ScriptService scriptService) {
super(settings);
this.scriptService = scriptService;
this.clusterService = clusterService;
}
/**

View File

@ -508,7 +508,7 @@ final class BootstrapCheck {
}
static abstract class MightForkCheck implements BootstrapCheck.Check {
abstract static class MightForkCheck implements BootstrapCheck.Check {
@Override
public boolean check() {

View File

@ -47,8 +47,8 @@ final class JNAKernel32Library {
private List<NativeHandlerCallback> callbacks = new ArrayList<>();
// Native library instance must be kept around for the same reason.
private final static class Holder {
private final static JNAKernel32Library instance = new JNAKernel32Library();
private static final class Holder {
private static final JNAKernel32Library instance = new JNAKernel32Library();
}
private JNAKernel32Library() {

View File

@ -62,7 +62,7 @@ public class JavaVersion implements Comparable<JavaVersion> {
return value.matches("^0*[0-9]+(\\.[0-9]+)*$");
}
private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
private static final JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
public static JavaVersion current() {
return CURRENT;

View File

@ -26,16 +26,16 @@ package org.elasticsearch.cluster;
public interface ClusterInfoService {
/** The latest cluster information */
public ClusterInfo getClusterInfo();
ClusterInfo getClusterInfo();
/** Add a listener that will be called every time new information is gathered */
public void addListener(Listener listener);
void addListener(Listener listener);
/**
* Interface for listeners to implement in order to perform actions when
* new information about the cluster has been gathered
*/
public interface Listener {
public void onNewInfo(ClusterInfo info);
interface Listener {
void onNewInfo(ClusterInfo info);
}
}

View File

@ -42,6 +42,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@ -114,7 +115,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
String type();
}
private final static Map<String, Custom> customPrototypes = new HashMap<>();
private static final Map<String, Custom> customPrototypes = new HashMap<>();
/**
* Register a custom index meta data factory. Make sure to call it from a static block.
@ -702,7 +703,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
public static byte[] toBytes(ClusterState state) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
state.writeTo(os);
return os.bytes().toBytes();
return BytesReference.toBytes(os.bytes());
}
/**
@ -711,6 +712,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
*/
public static ClusterState fromBytes(byte[] data, DiscoveryNode localNode) throws IOException {
return readFrom(StreamInput.wrap(data), localNode);
}
/**

View File

@ -274,7 +274,7 @@ public class ClusterStateObserver {
}
public static abstract class ValidationPredicate implements ChangePredicate {
public abstract static class ValidationPredicate implements ChangePredicate {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
@ -289,7 +289,7 @@ public class ClusterStateObserver {
}
}
public static abstract class EventPredicate implements ChangePredicate {
public abstract static class EventPredicate implements ChangePredicate {
@Override
public boolean apply(ClusterState previousState, ClusterState.ClusterStateStatus previousStatus, ClusterState newState, ClusterState.ClusterStateStatus newStatus) {
return previousState != newState || previousStatus != newStatus;
@ -298,8 +298,8 @@ public class ClusterStateObserver {
}
static class ObservingContext {
final public Listener listener;
final public ChangePredicate changePredicate;
public final Listener listener;
public final ChangePredicate changePredicate;
public ObservingContext(Listener listener, ChangePredicate changePredicate) {
this.listener = listener;
@ -308,8 +308,8 @@ public class ClusterStateObserver {
}
static class ObservedState {
final public ClusterState clusterState;
final public ClusterState.ClusterStateStatus status;
public final ClusterState clusterState;
public final ClusterState.ClusterStateStatus status;
public ObservedState(ClusterState clusterState) {
this.clusterState = clusterState;
@ -322,7 +322,7 @@ public class ClusterStateObserver {
}
}
private final static class ContextPreservingListener implements Listener {
private static final class ContextPreservingListener implements Listener {
private final Listener delegate;
private final ThreadContext.StoredContext tempContext;

View File

@ -51,8 +51,8 @@ public interface ClusterStateTaskExecutor<T> {
* @param <T> the type of the cluster state update task
*/
class BatchResult<T> {
final public ClusterState resultingState;
final public Map<T, TaskResult> executionResults;
public final ClusterState resultingState;
public final Map<T, TaskResult> executionResults;
/**
* Construct an execution result instance with a correspondence between the tasks and their execution result

View File

@ -28,9 +28,9 @@ import java.util.List;
/**
* A task that can update the cluster state.
*/
abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor<ClusterStateUpdateTask>, ClusterStateTaskListener {
final private Priority priority;
private final Priority priority;
public ClusterStateUpdateTask() {
this(Priority.NORMAL);
@ -41,7 +41,7 @@ abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig,
}
@Override
final public BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
public final BatchResult<ClusterStateUpdateTask> execute(ClusterState currentState, List<ClusterStateUpdateTask> tasks) throws Exception {
ClusterState result = execute(currentState);
return BatchResult.<ClusterStateUpdateTask>builder().successes(tasks).build(result);
}
@ -50,12 +50,12 @@ abstract public class ClusterStateUpdateTask implements ClusterStateTaskConfig,
* Update the cluster state based on the current state. Return the *same instance* if no state
* should be changed.
*/
abstract public ClusterState execute(ClusterState currentState) throws Exception;
public abstract ClusterState execute(ClusterState currentState) throws Exception;
/**
* A callback called when execute fails.
*/
abstract public void onFailure(String source, Throwable t);
public abstract void onFailure(String source, Throwable t);
/**
* If the cluster state update task wasn't processed by the provided timeout, call

View File

@ -330,7 +330,7 @@ public final class DiffableUtils {
* @param <T> the type of map values
* @param <M> the map implementation type
*/
public static abstract class MapDiff<K, T, M> implements Diff<M> {
public abstract static class MapDiff<K, T, M> implements Diff<M> {
protected final List<K> deletes;
protected final Map<K, Diff<T>> diffs; // incremental updates
@ -534,7 +534,7 @@ public final class DiffableUtils {
* @param <K> type of map keys
* @param <V> type of map values
*/
public static abstract class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
public abstract static class DiffableValueSerializer<K, V extends Diffable<V>> implements ValueSerializer<K, V> {
private static final DiffableValueSerializer WRITE_ONLY_INSTANCE = new DiffableValueSerializer() {
@Override
public Object read(StreamInput in, Object key) throws IOException {
@ -577,7 +577,7 @@ public final class DiffableUtils {
* @param <K> type of map keys
* @param <V> type of map values
*/
public static abstract class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
public abstract static class NonDiffableValueSerializer<K, V> implements ValueSerializer<K, V> {
@Override
public boolean supportsDiffableValues() {
return false;

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings;
* ClusterInfoService that provides empty maps for disk usage and shard sizes
*/
public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService {
public final static EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService();
public static final EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService();
private EmptyClusterInfoService() {
super(Settings.EMPTY);

View File

@ -57,7 +57,7 @@ public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConne
// if a node doesn't appear in this list it shouldn't be monitored
private ConcurrentMap<DiscoveryNode, Integer> nodes = ConcurrentCollections.newConcurrentMap();
final private KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
private final KeyedLock<DiscoveryNode> nodeLocks = new KeyedLock<>();
private final TimeValue reconnectInterval;

View File

@ -54,7 +54,7 @@ import java.util.function.BiFunction;
* tombstones remain in the cluster state for a fixed period of time, after which
* they are purged.
*/
final public class IndexGraveyard implements MetaData.Custom {
public final class IndexGraveyard implements MetaData.Custom {
/**
* Setting for the maximum tombstones allowed in the cluster state;
@ -188,7 +188,7 @@ final public class IndexGraveyard implements MetaData.Custom {
/**
* A class to build an IndexGraveyard.
*/
final public static class Builder {
public static final class Builder {
private List<Tombstone> tombstones;
private int numPurged = -1;
private final long currentTime = System.currentTimeMillis();
@ -273,7 +273,7 @@ final public class IndexGraveyard implements MetaData.Custom {
/**
* A class representing a diff of two IndexGraveyard objects.
*/
final public static class IndexGraveyardDiff implements Diff<MetaData.Custom> {
public static final class IndexGraveyardDiff implements Diff<MetaData.Custom> {
private final List<Tombstone> added;
private final int removedCount;
@ -354,7 +354,7 @@ final public class IndexGraveyard implements MetaData.Custom {
/**
* An individual tombstone entry for representing a deleted index.
*/
final public static class Tombstone implements ToXContent, Writeable {
public static final class Tombstone implements ToXContent, Writeable {
private static final String INDEX_KEY = "index";
private static final String DELETE_DATE_IN_MILLIS_KEY = "delete_date_in_millis";
@ -449,7 +449,7 @@ final public class IndexGraveyard implements MetaData.Custom {
/**
* A builder for building tombstone entries.
*/
final private static class Builder {
private static final class Builder {
private Index index;
private long deleteDateInMillis = -1L;

View File

@ -255,7 +255,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
private final ImmutableOpenIntMap<Set<String>> activeAllocationIds;
private transient final int totalNumberOfShards;
private final transient int totalNumberOfShards;
private final DiscoveryNodeFilters requireFilters;
private final DiscoveryNodeFilters includeFilters;

View File

@ -488,7 +488,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
return false;
}
final static class Context {
static final class Context {
private final ClusterState state;
private final IndicesOptions options;
@ -551,7 +551,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
/**
* Resolves alias/index name expressions with wildcards into the corresponding concrete indices/aliases
*/
final static class WildcardExpressionResolver implements ExpressionResolver {
static final class WildcardExpressionResolver implements ExpressionResolver {
@Override
public List<String> resolve(Context context, List<String> expressions) {
@ -738,7 +738,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
}
}
final static class DateMathExpressionResolver implements ExpressionResolver {
static final class DateMathExpressionResolver implements ExpressionResolver {
private static final String EXPRESSION_LEFT_BOUND = "<";
private static final String EXPRESSION_RIGHT_BOUND = ">";

View File

@ -32,7 +32,7 @@ public interface IndexTemplateFilter {
*/
boolean apply(CreateIndexClusterStateUpdateRequest request, IndexTemplateMetaData template);
static class Compound implements IndexTemplateFilter {
class Compound implements IndexTemplateFilter {
private IndexTemplateFilter[] filters;

View File

@ -19,11 +19,8 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.Version;
import org.elasticsearch.action.TimestampParsingException;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -34,11 +31,9 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenientNodeBooleanValue;
@ -82,8 +77,6 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
public static class Timestamp {
private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis");
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
try {
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));

View File

@ -1187,7 +1187,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
}
}
private final static ToXContent.Params FORMAT_PARAMS;
private static final ToXContent.Params FORMAT_PARAMS;
static {
Map<String, String> params = new HashMap<>(2);
params.put("binary", "true");
@ -1198,7 +1198,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/**
* State format for {@link MetaData} to write to and load from disk
*/
public final static MetaDataStateFormat<MetaData> FORMAT = new MetaDataStateFormat<MetaData>(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) {
public static final MetaDataStateFormat<MetaData> FORMAT = new MetaDataStateFormat<MetaData>(XContentType.SMILE, GLOBAL_STATE_FILE_PREFIX) {
@Override
public void toXContent(XContentBuilder builder, MetaData state) throws IOException {

View File

@ -97,7 +97,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_C
*/
public class MetaDataCreateIndexService extends AbstractComponent {
public final static int MAX_INDEX_NAME_BYTES = 255;
public static final int MAX_INDEX_NAME_BYTES = 255;
private static final DefaultIndexTemplateFilter DEFAULT_INDEX_TEMPLATE_FILTER = new DefaultIndexTemplateFilter();
private final ClusterService clusterService;

View File

@ -35,8 +35,8 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
@ -160,7 +160,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
throw new IndexTemplateAlreadyExistsException(request.name);
}
validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider, metaDataCreateIndexService);
validateAndAddTemplate(request, templateBuilder, indicesService, nodeServicesProvider);
for (Alias alias : request.aliases) {
AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter())
@ -185,7 +185,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
private static void validateAndAddTemplate(final PutRequest request, IndexTemplateMetaData.Builder templateBuilder, IndicesService indicesService,
NodeServicesProvider nodeServicesProvider, MetaDataCreateIndexService metaDataCreateIndexService) throws Exception {
NodeServicesProvider nodeServicesProvider) throws Exception {
Index createdIndex = null;
final String temporaryIndexName = UUIDs.randomBase64UUID();
try {
@ -276,7 +276,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
}
public static interface PutListener {
public interface PutListener {
void onResponse(PutResponse response);
@ -391,7 +391,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
}
public static interface RemoveListener {
public interface RemoveListener {
void onResponse(RemoveResponse response);

View File

@ -60,7 +60,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
final List<ShardRouting> shards;
final List<ShardRouting> activeShards;
final List<ShardRouting> assignedShards;
final static List<ShardRouting> NO_SHARDS = Collections.emptyList();
static final List<ShardRouting> NO_SHARDS = Collections.emptyList();
final boolean allShardsStarted;
private volatile Map<AttributesKey, AttributesRoutings> activeShardsByAttributes = emptyMap();

View File

@ -229,11 +229,4 @@ public class OperationRouting extends AbstractComponent {
// of original index to hash documents
return Math.floorMod(hash, indexMetaData.getRoutingNumShards()) / indexMetaData.getRoutingFactor();
}
private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
if (!nodes.getDataNodes().keys().contains(nodeId)) {
throw new IllegalArgumentException("No data node with id[" + nodeId + "] found");
}
}
}

View File

@ -80,7 +80,8 @@ public class RoutingNode implements Iterable<ShardRouting> {
return this.node;
}
public @Nullable ShardRouting getByShardId(ShardId id) {
@Nullable
public ShardRouting getByShardId(ShardId id) {
return shards.get(id);
}

View File

@ -62,7 +62,7 @@ public abstract class AbstractAllocateAllocationCommand implements AllocationCom
/**
* Works around ObjectParser not supporting constructor arguments.
*/
protected static abstract class Builder<T extends AbstractAllocateAllocationCommand> {
protected abstract static class Builder<T extends AbstractAllocateAllocationCommand> {
protected String index;
protected int shard = -1;
protected String node;

View File

@ -71,7 +71,7 @@ public abstract class BasePrimaryAllocationCommand extends AbstractAllocateAlloc
return acceptDataLoss;
}
protected static abstract class Builder<T extends BasePrimaryAllocationCommand> extends AbstractAllocateAllocationCommand.Builder<T> {
protected abstract static class Builder<T extends BasePrimaryAllocationCommand> extends AbstractAllocateAllocationCommand.Builder<T> {
protected boolean acceptDataLoss;
public void setAcceptDataLoss(boolean acceptDataLoss) {

View File

@ -505,7 +505,6 @@ public class DiskThresholdDecider extends AllocationDecider {
}
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {
ClusterInfo clusterInfo = allocation.clusterInfo();
DiskUsage usage = usages.get(node.nodeId());
if (usage == null) {
// If there is no usage, and we have other nodes in the cluster,

View File

@ -154,17 +154,17 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
}
synchronized public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
public synchronized void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
clusterStatePublisher = publisher;
}
synchronized public void setLocalNode(DiscoveryNode localNode) {
public synchronized void setLocalNode(DiscoveryNode localNode) {
assert clusterState.nodes().getLocalNodeId() == null : "local node is already set";
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
}
synchronized public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
assert this.nodeConnectionsService == null : "nodeConnectionsService is already set";
this.nodeConnectionsService = nodeConnectionsService;
}
@ -172,7 +172,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
/**
* Adds an initial block to be set on the first cluster state created.
*/
synchronized public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
public synchronized void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started");
}
@ -182,14 +182,14 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
/**
* Remove an initial block to be set on the first cluster state created.
*/
synchronized public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
public synchronized void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
removeInitialStateBlock(block.id());
}
/**
* Remove an initial block to be set on the first cluster state created.
*/
synchronized public void removeInitialStateBlock(int blockId) throws IllegalStateException {
public synchronized void removeInitialStateBlock(int blockId) throws IllegalStateException {
if (lifecycle.started()) {
throw new IllegalStateException("can't set initial block when started");
}
@ -197,7 +197,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
}
@Override
synchronized protected void doStart() {
protected synchronized void doStart() {
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
Objects.requireNonNull(clusterState.nodes().getLocalNode(), "please set the local node before starting");
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
@ -209,7 +209,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
}
@Override
synchronized protected void doStop() {
protected synchronized void doStop() {
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
onGoingTimeout.cancel();
try {
@ -230,7 +230,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
}
@Override
synchronized protected void doClose() {
protected synchronized void doClose() {
}
/**
@ -497,7 +497,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
return clusterName;
}
static abstract class SourcePrioritizedRunnable extends PrioritizedRunnable {
abstract static class SourcePrioritizedRunnable extends PrioritizedRunnable {
protected final String source;
public SourcePrioritizedRunnable(Priority priority, String source) {
@ -959,7 +959,7 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
private static class DelegetingAckListener implements Discovery.AckListener {
final private List<Discovery.AckListener> listeners;
private final List<Discovery.AckListener> listeners;
private DelegetingAckListener(List<Discovery.AckListener> listeners) {
this.listeners = listeners;

View File

@ -23,5 +23,5 @@ package org.elasticsearch.common;
* Generates opaque unique strings.
*/
interface UUIDGenerator {
public String getBase64UUID();
String getBase64UUID();
}

View File

@ -85,7 +85,7 @@ public class FsBlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
Path blobPath = path.resolve(blobName);
Files.delete(blobPath);
Files.deleteIfExists(blobPath);
}
@Override

View File

@ -27,13 +27,12 @@ import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
public final class BytesArray implements BytesReference {
public final class BytesArray extends BytesReference {
public static final BytesArray EMPTY = new BytesArray(BytesRef.EMPTY_BYTES, 0, 0);
private byte[] bytes;
private int offset;
private int length;
private final byte[] bytes;
private final int offset;
private final int length;
public BytesArray(String bytes) {
BytesRef bytesRef = new BytesRef(bytes);
@ -89,74 +88,21 @@ public final class BytesArray implements BytesReference {
return new BytesArray(bytes, offset + from, length);
}
@Override
public StreamInput streamInput() {
return StreamInput.wrap(bytes, offset, length);
}
@Override
public void writeTo(OutputStream os) throws IOException {
os.write(bytes, offset, length);
}
@Override
public byte[] toBytes() {
if (offset == 0 && bytes.length == length) {
return bytes;
}
return Arrays.copyOfRange(bytes, offset, offset + length);
}
@Override
public BytesArray toBytesArray() {
return this;
}
@Override
public BytesArray copyBytesArray() {
return new BytesArray(Arrays.copyOfRange(bytes, offset, offset + length));
}
@Override
public boolean hasArray() {
return true;
}
@Override
public byte[] array() {
return bytes;
}
@Override
public int arrayOffset() {
public int offset() {
return offset;
}
@Override
public String toUtf8() {
if (length == 0) {
return "";
}
return new String(bytes, offset, length, StandardCharsets.UTF_8);
}
@Override
public BytesRef toBytesRef() {
return new BytesRef(bytes, offset, length);
}
@Override
public BytesRef copyBytesRef() {
return new BytesRef(Arrays.copyOfRange(bytes, offset, offset + length));
}
@Override
public int hashCode() {
return Helper.bytesHashCode(this);
}
@Override
public boolean equals(Object obj) {
return Helper.bytesEqual(this, (BytesReference) obj);
public long ramBytesUsed() {
return bytes.length;
}
}

View File

@ -18,147 +18,74 @@
*/
package org.elasticsearch.common.bytes;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
import java.io.OutputStream;
import java.util.function.ToIntBiFunction;
/**
* A reference to bytes.
*/
public interface BytesReference {
public abstract class BytesReference implements Accountable, Comparable<BytesReference> {
class Helper {
public static boolean bytesEqual(BytesReference a, BytesReference b) {
if (a == b) {
return true;
}
if (a.length() != b.length()) {
return false;
}
return bytesEquals(a, b);
}
// pkg-private for testing
static boolean bytesEquals(BytesReference a, BytesReference b) {
assert a.length() == b.length();
for (int i = 0, end = a.length(); i < end; ++i) {
if (a.get(i) != b.get(i)) {
return false;
}
}
return true;
}
public static int bytesHashCode(BytesReference a) {
if (a.hasArray()) {
return hashCode(a.array(), a.arrayOffset(), a.length());
} else {
return slowHashCode(a);
}
}
// pkg-private for testing
static int hashCode(byte[] array, int offset, int length) {
int result = 1;
for (int i = offset, end = offset + length; i < end; ++i) {
result = 31 * result + array[i];
}
return result;
}
// pkg-private for testing
static int slowHashCode(BytesReference a) {
int result = 1;
for (int i = 0, end = a.length(); i < end; ++i) {
result = 31 * result + a.get(i);
}
return result;
}
}
private Integer hash = null; // we cache the hash of this reference since it can be quite costly to re-calculated it
/**
* Returns the byte at the specified index. Need to be between 0 and length.
*/
byte get(int index);
public abstract byte get(int index);
/**
* The length.
*/
int length();
public abstract int length();
/**
* Slice the bytes from the <tt>from</tt> index up to <tt>length</tt>.
*/
BytesReference slice(int from, int length);
public abstract BytesReference slice(int from, int length);
/**
* A stream input of the bytes.
*/
StreamInput streamInput();
public StreamInput streamInput() {
BytesRef ref = toBytesRef();
return StreamInput.wrap(ref.bytes, ref.offset, ref.length);
}
/**
* Writes the bytes directly to the output stream.
*/
void writeTo(OutputStream os) throws IOException;
public void writeTo(OutputStream os) throws IOException {
final BytesRefIterator iterator = iterator();
BytesRef ref;
while ((ref = iterator.next()) != null) {
os.write(ref.bytes, ref.offset, ref.length);
}
}
/**
* Returns the bytes as a single byte array.
* Interprets the referenced bytes as UTF8 bytes, returning the resulting string
*/
byte[] toBytes();
/**
* Returns the bytes as a byte array, possibly sharing the underlying byte buffer.
*/
BytesArray toBytesArray();
/**
* Returns the bytes copied over as a byte array.
*/
BytesArray copyBytesArray();
/**
* Is there an underlying byte array for this bytes reference.
*/
boolean hasArray();
/**
* The underlying byte array (if exists).
*/
byte[] array();
/**
* The offset into the underlying byte array.
*/
int arrayOffset();
/**
* Converts to a string based on utf8.
*/
String toUtf8();
public String utf8ToString() {
return toBytesRef().utf8ToString();
}
/**
* Converts to Lucene BytesRef.
*/
BytesRef toBytesRef();
/**
* Converts to a copied Lucene BytesRef.
*/
BytesRef copyBytesRef();
public abstract BytesRef toBytesRef();
/**
* Returns a BytesRefIterator for this BytesReference. This method allows
* access to the internal pages of this reference without copying them. Use with care!
* @see BytesRefIterator
*/
default BytesRefIterator iterator() {
public BytesRefIterator iterator() {
return new BytesRefIterator() {
BytesRef ref = length() == 0 ? null : toBytesRef();
@Override
@ -170,4 +97,115 @@ public interface BytesReference {
};
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other instanceof BytesReference) {
final BytesReference otherRef = (BytesReference) other;
if (length() != otherRef.length()) {
return false;
}
return compareIterators(this, otherRef, (a, b) ->
a.bytesEquals(b) ? 0 : 1 // this is a call to BytesRef#bytesEquals - this method is the hot one in the comparison
) == 0;
}
return false;
}
@Override
public int hashCode() {
if (hash == null) {
final BytesRefIterator iterator = iterator();
BytesRef ref;
int result = 1;
try {
while ((ref = iterator.next()) != null) {
for (int i = 0; i < ref.length; i++) {
result = 31 * result + ref.bytes[ref.offset + i];
}
}
} catch (IOException ex) {
throw new AssertionError("wont happen", ex);
}
return hash = result;
} else {
return hash.intValue();
}
}
/**
* Returns a compact array from the given BytesReference. The returned array won't be copied unless necessary. If you need
* to modify the returned array use <tt>BytesRef.deepCopyOf(reference.toBytesRef()</tt> instead
*/
public static byte[] toBytes(BytesReference reference) {
final BytesRef bytesRef = reference.toBytesRef();
if (bytesRef.offset == 0 && bytesRef.length == bytesRef.bytes.length) {
return bytesRef.bytes;
}
return BytesRef.deepCopyOf(bytesRef).bytes;
}
@Override
public int compareTo(final BytesReference other) {
return compareIterators(this, other, (a, b) -> a.compareTo(b));
}
/**
* Compares the two references using the given int function.
*/
private static final int compareIterators(final BytesReference a, final BytesReference b, final ToIntBiFunction<BytesRef, BytesRef> f) {
try {
// we use the iterators since it's a 0-copy comparison where possible!
final long lengthToCompare = Math.min(a.length(), b.length());
final BytesRefIterator aIter = a.iterator();
final BytesRefIterator bIter = b.iterator();
BytesRef aRef = aIter.next();
BytesRef bRef = bIter.next();
if (aRef != null && bRef != null) { // do we have any data?
aRef = aRef.clone(); // we clone since we modify the offsets and length in the iteration below
bRef = bRef.clone();
if (aRef.length == a.length() && bRef.length == b.length()) { // is it only one array slice we are comparing?
return f.applyAsInt(aRef, bRef);
} else {
for (int i = 0; i < lengthToCompare;) {
if (aRef.length == 0) {
aRef = aIter.next().clone(); // must be non null otherwise we have a bug
}
if (bRef.length == 0) {
bRef = bIter.next().clone(); // must be non null otherwise we have a bug
}
final int aLength = aRef.length;
final int bLength = bRef.length;
final int length = Math.min(aLength, bLength); // shrink to the same length and use the fast compare in lucene
aRef.length = bRef.length = length;
// now we move to the fast comparison - this is the hot part of the loop
int diff = f.applyAsInt(aRef, bRef);
aRef.length = aLength;
bRef.length = bLength;
if (diff != 0) {
return diff;
}
advance(aRef, length);
advance(bRef, length);
i += length;
}
}
}
// One is a prefix of the other, or, they are equal:
return a.length() - b.length();
} catch (IOException ex) {
throw new AssertionError("can not happen", ex);
}
}
private static final void advance(final BytesRef ref, final int length) {
assert ref.length >= length : " ref.length: " + ref.length + " length: " + length;
assert ref.offset+length < ref.bytes.length || (ref.offset+length == ref.bytes.length && ref.length-length == 0)
: "offset: " + ref.offset + " ref.bytes.length: " + ref.bytes.length + " length: " + length + " ref.length: " + ref.length;
ref.length -= length;
ref.offset += length;
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.common.bytes;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
@ -35,7 +34,7 @@ import java.util.Arrays;
* A page based bytes reference, internally holding the bytes in a paged
* data structure.
*/
public class PagedBytesReference implements BytesReference {
public class PagedBytesReference extends BytesReference {
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
@ -80,119 +79,6 @@ public class PagedBytesReference implements BytesReference {
return new PagedBytesReferenceStreamInput(bytearray, offset, length);
}
@Override
public void writeTo(OutputStream os) throws IOException {
// nothing to do
if (length == 0) {
return;
}
BytesRef ref = new BytesRef();
int written = 0;
// are we a slice?
if (offset != 0) {
// remaining size of page fragment at offset
int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE));
bytearray.get(offset, fragmentSize, ref);
os.write(ref.bytes, ref.offset, fragmentSize);
written += fragmentSize;
}
// handle remainder of pages + trailing fragment
while (written < length) {
int remaining = length - written;
int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining;
bytearray.get(offset + written, bulkSize, ref);
os.write(ref.bytes, ref.offset, bulkSize);
written += bulkSize;
}
}
@Override
public byte[] toBytes() {
if (length == 0) {
return BytesRef.EMPTY_BYTES;
}
BytesRef ref = new BytesRef();
bytearray.get(offset, length, ref);
// undo the single-page optimization by ByteArray.get(), otherwise
// a materialized stream will contain trailing garbage/zeros
byte[] result = ref.bytes;
if (result.length != length || ref.offset != 0) {
result = Arrays.copyOfRange(result, ref.offset, ref.offset + length);
}
return result;
}
@Override
public BytesArray toBytesArray() {
BytesRef ref = new BytesRef();
bytearray.get(offset, length, ref);
return new BytesArray(ref);
}
@Override
public BytesArray copyBytesArray() {
BytesRef ref = new BytesRef();
boolean copied = bytearray.get(offset, length, ref);
if (copied) {
// BigArray has materialized for us, no need to do it again
return new BytesArray(ref.bytes, ref.offset, ref.length);
} else {
// here we need to copy the bytes even when shared
byte[] copy = Arrays.copyOfRange(ref.bytes, ref.offset, ref.offset + ref.length);
return new BytesArray(copy);
}
}
@Override
public boolean hasArray() {
return (offset + length <= PAGE_SIZE);
}
@Override
public byte[] array() {
if (hasArray()) {
if (length == 0) {
return BytesRef.EMPTY_BYTES;
}
BytesRef ref = new BytesRef();
bytearray.get(offset, length, ref);
return ref.bytes;
}
throw new IllegalStateException("array not available");
}
@Override
public int arrayOffset() {
if (hasArray()) {
BytesRef ref = new BytesRef();
bytearray.get(offset, length, ref);
return ref.offset;
}
throw new IllegalStateException("array not available");
}
@Override
public String toUtf8() {
if (length() == 0) {
return "";
}
byte[] bytes = toBytes();
final CharsRefBuilder ref = new CharsRefBuilder();
ref.copyUTF8Bytes(bytes, offset, length);
return ref.toString();
}
@Override
public BytesRef toBytesRef() {
BytesRef bref = new BytesRef();
@ -201,61 +87,6 @@ public class PagedBytesReference implements BytesReference {
return bref;
}
@Override
public BytesRef copyBytesRef() {
byte[] bytes = toBytes();
return new BytesRef(bytes, offset, length);
}
@Override
public int hashCode() {
if (hash == 0) {
// TODO: delegate to BigArrays via:
// hash = bigarrays.hashCode(bytearray);
// and for slices:
// hash = bigarrays.hashCode(bytearray, offset, length);
int tmphash = 1;
for (int i = 0; i < length; i++) {
tmphash = 31 * tmphash + bytearray.get(offset + i);
}
hash = tmphash;
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null) {
return false;
}
if (!(obj instanceof PagedBytesReference)) {
return BytesReference.Helper.bytesEqual(this, (BytesReference) obj);
}
PagedBytesReference other = (PagedBytesReference) obj;
if (length != other.length) {
return false;
}
// TODO: delegate to BigArrays via:
// return bigarrays.equals(bytearray, other.bytearray);
// and for slices:
// return bigarrays.equals(bytearray, start, other.bytearray, otherstart, len);
ByteArray otherArray = other.bytearray;
int otherOffset = other.offset;
for (int i = 0; i < length; i++) {
if (bytearray.get(offset + i) != otherArray.get(otherOffset + i)) {
return false;
}
}
return true;
}
private static class PagedBytesReferenceStreamInput extends StreamInput {
private final ByteArray bytearray;
@ -390,4 +221,9 @@ public class PagedBytesReference implements BytesReference {
}
};
}
@Override
public long ramBytesUsed() {
return bytearray.ramBytesUsed();
}
}

View File

@ -75,7 +75,7 @@ public final class CopyOnWriteHashMap<K, V> extends AbstractMap<K, V> {
/**
* Abstraction of a node, implemented by both inner and leaf nodes.
*/
private static abstract class Node<K, V> {
private abstract static class Node<K, V> {
/**
* Recursively get the key with the given hash.

View File

@ -132,8 +132,8 @@ public final class HppcMaps {
};
}
public final static class Object {
public final static class Integer {
public static final class Object {
public static final class Integer {
public static <V> ObjectIntHashMap<V> ensureNoNullKeys(int capacity, float loadFactor) {
return new ObjectIntHashMap<V>(capacity, loadFactor) {
@Override

View File

@ -88,7 +88,7 @@ public final class CompressedXContent {
xcontent.toXContent(builder, params);
builder.endObject();
}
this.bytes = bStream.bytes().toBytes();
this.bytes = BytesReference.toBytes(bStream.bytes());
this.crc32 = (int) crc32.getValue();
assertConsistent();
}
@ -101,14 +101,14 @@ public final class CompressedXContent {
Compressor compressor = CompressorFactory.compressor(data);
if (compressor != null) {
// already compressed...
this.bytes = data.toBytes();
this.bytes = BytesReference.toBytes(data);
this.crc32 = crc32(new BytesArray(uncompressed()));
} else {
BytesStreamOutput out = new BytesStreamOutput();
try (OutputStream compressedOutput = CompressorFactory.COMPRESSOR.streamOutput(out)) {
data.writeTo(compressedOutput);
}
this.bytes = out.bytes().toBytes();
this.bytes = BytesReference.toBytes(out.bytes());
this.crc32 = crc32(data);
}
assertConsistent();
@ -140,7 +140,7 @@ public final class CompressedXContent {
/** Return the uncompressed bytes. */
public byte[] uncompressed() {
try {
return CompressorFactory.uncompress(new BytesArray(bytes)).toBytes();
return BytesReference.toBytes(CompressorFactory.uncompress(new BytesArray(bytes)));
} catch (IOException e) {
throw new IllegalStateException("Cannot decompress compressed string", e);
}

View File

@ -80,7 +80,7 @@ public enum GeoDistance implements Writeable {
@Override
public FixedSourceDistance fixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude, unit);
return new FactorFixedSourceDistance(sourceLatitude, sourceLongitude);
}
},
/**
@ -217,12 +217,12 @@ public enum GeoDistance implements Writeable {
throw new IllegalArgumentException("No geo distance for [" + name + "]");
}
public static interface FixedSourceDistance {
public interface FixedSourceDistance {
double calculate(double targetLatitude, double targetLongitude);
}
public static interface DistanceBoundingCheck {
public interface DistanceBoundingCheck {
boolean isWithin(double targetLatitude, double targetLongitude);
@ -331,7 +331,7 @@ public enum GeoDistance implements Writeable {
private final double sinA;
private final double cosA;
public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude, DistanceUnit unit) {
public FactorFixedSourceDistance(double sourceLatitude, double sourceLongitude) {
this.sourceLongitude = sourceLongitude;
this.a = Math.toRadians(90D - sourceLatitude);
this.sinA = Math.sin(a);
@ -350,7 +350,7 @@ public enum GeoDistance implements Writeable {
* Basic implementation of {@link FixedSourceDistance}. This class keeps the basic parameters for a distance
* functions based on a fixed source. Namely latitude, longitude and unit.
*/
public static abstract class FixedSourceDistanceBase implements FixedSourceDistance {
public abstract static class FixedSourceDistanceBase implements FixedSourceDistance {
protected final double sourceLatitude;
protected final double sourceLongitude;
protected final DistanceUnit unit;

View File

@ -195,7 +195,7 @@ public class GeoHashUtils {
* @param dy delta of the second grid coordinate (must be -1, 0 or +1)
* @return geohash of the defined cell
*/
public final static String neighbor(String geohash, int level, int dx, int dy) {
public static final String neighbor(String geohash, int level, int dx, int dy) {
int cell = BASE_32_STRING.indexOf(geohash.charAt(level -1));
// Decoding the Geohash bit pattern to determine grid coordinates

View File

@ -19,20 +19,19 @@
package org.elasticsearch.common.geo.builders;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory;
import com.vividsolutions.jts.geom.LinearRing;
import com.vividsolutions.jts.geom.MultiPolygon;
import com.vividsolutions.jts.geom.Polygon;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.locationtech.spatial4j.exception.InvalidShapeException;
import org.locationtech.spatial4j.shape.Shape;
import java.io.IOException;
import java.util.ArrayList;
@ -579,7 +578,7 @@ public class PolygonBuilder extends ShapeBuilder {
boolean direction = (component == 0 ^ orientation == Orientation.RIGHT);
// set the points array accordingly (shell or hole)
Coordinate[] points = (hole != null) ? hole.coordinates(false) : shell.coordinates(false);
ring(component, direction, orientation == Orientation.LEFT, shell, points, 0, edges, offset, points.length-1, translated);
ring(component, direction, orientation == Orientation.LEFT, points, 0, edges, offset, points.length-1, translated);
return points.length-1;
}
@ -594,7 +593,7 @@ public class PolygonBuilder extends ShapeBuilder {
* number of points
* @return Array of edges
*/
private static Edge[] ring(int component, boolean direction, boolean handedness, LineStringBuilder shell,
private static Edge[] ring(int component, boolean direction, boolean handedness,
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
// calculate the direction of the points:
// find the point a the top of the set and check its

View File

@ -47,7 +47,7 @@ public interface Scope {
* when an instance of the requested object doesn't already exist in this
* scope
*/
public <T> Provider<T> scope(Key<T> key, Provider<T> unscoped);
<T> Provider<T> scope(Key<T> key, Provider<T> unscoped);
/**
* A short but useful description of this scope. For comparison, the standard

View File

@ -36,7 +36,7 @@ import static java.util.Collections.emptySet;
*/
interface State {
static final State NONE = new State() {
State NONE = new State() {
@Override
public State parent() {
throw new UnsupportedOperationException();

View File

@ -551,7 +551,7 @@ public final class Errors {
return root.errors == null ? 0 : root.errors.size();
}
private static abstract class Converter<T> {
private abstract static class Converter<T> {
final Class<T> type;

View File

@ -29,7 +29,7 @@ public interface InternalFactory<T> {
* ES:
* An factory that returns a pre created instance.
*/
public static class Instance<T> implements InternalFactory<T> {
class Instance<T> implements InternalFactory<T> {
private final T object;

View File

@ -805,13 +805,6 @@ public abstract class StreamInput extends InputStream {
return builder;
}
public static StreamInput wrap(BytesReference reference) {
if (reference.hasArray() == false) {
reference = reference.toBytesArray();
}
return wrap(reference.array(), reference.arrayOffset(), reference.length());
}
public static StreamInput wrap(byte[] bytes) {
return wrap(bytes, 0, bytes.length);
}

View File

@ -408,7 +408,7 @@ public abstract class StreamOutput extends OutputStream {
void write(StreamOutput o, Object value) throws IOException;
}
private final static Map<Class<?>, Writer> WRITERS;
private static final Map<Class<?>, Writer> WRITERS;
static {
Map<Class<?>, Writer> writers = new HashMap<>();

View File

@ -38,7 +38,7 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
*/
public class Loggers {
private final static String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
public static final String SPACE = " ";

View File

@ -245,14 +245,14 @@ public class Lucene {
/**
* Wraps <code>delegate</code> with count based early termination collector with a threshold of <code>maxCountHits</code>
*/
public final static EarlyTerminatingCollector wrapCountBasedEarlyTerminatingCollector(final Collector delegate, int maxCountHits) {
public static final EarlyTerminatingCollector wrapCountBasedEarlyTerminatingCollector(final Collector delegate, int maxCountHits) {
return new EarlyTerminatingCollector(delegate, maxCountHits);
}
/**
* Wraps <code>delegate</code> with a time limited collector with a timeout of <code>timeoutInMillis</code>
*/
public final static TimeLimitingCollector wrapTimeLimitingCollector(final Collector delegate, final Counter counter, long timeoutInMillis) {
public static final TimeLimitingCollector wrapTimeLimitingCollector(final Collector delegate, final Counter counter, long timeoutInMillis) {
return new TimeLimitingCollector(delegate, counter, timeoutInMillis);
}
@ -510,7 +510,7 @@ public class Lucene {
* This exception is thrown when {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminatingCollector}
* reaches early termination
* */
public final static class EarlyTerminationException extends ElasticsearchException {
public static final class EarlyTerminationException extends ElasticsearchException {
public EarlyTerminationException(String msg) {
super(msg);
@ -525,7 +525,7 @@ public class Lucene {
* A collector that terminates early by throwing {@link org.elasticsearch.common.lucene.Lucene.EarlyTerminationException}
* when count of matched documents has reached <code>maxCountHits</code>
*/
public final static class EarlyTerminatingCollector extends SimpleCollector {
public static final class EarlyTerminatingCollector extends SimpleCollector {
private final int maxCountHits;
private final Collector delegate;

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.lucene.all;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.SmallFloat;
@ -38,12 +37,10 @@ public final class AllTokenStream extends TokenFilter {
}
private final BytesRef payloadSpare = new BytesRef(new byte[1]);
private final OffsetAttribute offsetAttribute;
private final PayloadAttribute payloadAttribute;
AllTokenStream(TokenStream input, float boost) {
super(input);
offsetAttribute = addAttribute(OffsetAttribute.class);
payloadAttribute = addAttribute(PayloadAttribute.class);
payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
}

View File

@ -66,7 +66,7 @@ public final class ElasticsearchDirectoryReader extends FilterDirectoryReader {
return new ElasticsearchDirectoryReader(reader, new SubReaderWrapper(shardId), shardId);
}
private final static class SubReaderWrapper extends FilterDirectoryReader.SubReaderWrapper {
private static final class SubReaderWrapper extends FilterDirectoryReader.SubReaderWrapper {
private final ShardId shardId;
SubReaderWrapper(ShardId shardId) {
this.shardId = shardId;

View File

@ -60,7 +60,7 @@ public class FilterableTermsEnum extends TermsEnum {
}
static final String UNSUPPORTED_MESSAGE = "This TermsEnum only supports #seekExact(BytesRef) as well as #docFreq() and #totalTermFreq()";
protected final static int NOT_FOUND = -1;
protected static final int NOT_FOUND = -1;
private final Holder[] enums;
protected int currentDocFreq = 0;
protected long currentTotalTermFreq = 0;

View File

@ -102,7 +102,7 @@ public class FiltersFunctionScoreQuery extends Query {
final float maxBoost;
private final Float minScore;
final protected CombineFunction combineFunction;
protected final CombineFunction combineFunction;
public FiltersFunctionScoreQuery(Query subQuery, ScoreMode scoreMode, FilterFunction[] filterFunctions, float maxBoost, Float minScore, CombineFunction combineFunction) {
this.subQuery = subQuery;

View File

@ -76,7 +76,7 @@ public abstract class Rounding implements Streamable {
*/
public static class Interval extends Rounding {
final static byte ID = 0;
static final byte ID = 0;
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
@ -157,7 +157,7 @@ public abstract class Rounding implements Streamable {
public static class FactorRounding extends Rounding {
final static byte ID = 7;
static final byte ID = 7;
public static final ParseField FACTOR_FIELD = new ParseField("factor");
@ -226,7 +226,7 @@ public abstract class Rounding implements Streamable {
public static class OffsetRounding extends Rounding {
final static byte ID = 8;
static final byte ID = 8;
public static final ParseField OFFSET_FIELD = new ParseField("offset");

View File

@ -195,7 +195,7 @@ public abstract class TimeZoneRounding extends Rounding {
static class TimeIntervalRounding extends TimeZoneRounding {
final static byte ID = 2;
static final byte ID = 2;
private long interval;
private DateTimeZone timeZone;

View File

@ -49,7 +49,6 @@ public final class SettingsFilter extends AbstractComponent {
public SettingsFilter(Settings settings, Collection<String> patterns) {
super(settings);
HashSet<String> set = new HashSet<>();
for (String pattern : patterns) {
if (isValidPattern(pattern) == false) {
throw new IllegalArgumentException("invalid pattern: " + pattern);

View File

@ -33,7 +33,7 @@ import java.util.Map;
*/
public interface SettingsLoader {
static class Helper {
class Helper {
public static Map<String, String> loadNestedFromMap(@Nullable Map map) {
Map<String, String> settings = new HashMap<>();

View File

@ -82,13 +82,7 @@ public final class Text implements Comparable<Text> {
* Returns a {@link String} view of the data.
*/
public String string() {
if (text == null) {
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
return text;
return text == null ? bytes.utf8ToString() : text;
}
@Override
@ -114,6 +108,6 @@ public final class Text implements Comparable<Text> {
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
return bytes().compareTo(text.bytes());
}
}

View File

@ -1,77 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import org.elasticsearch.common.bytes.BytesReference;
import java.util.Comparator;
// LUCENE 4 UPGRADE: Is this the right way of comparing bytesreferences inside Text instances?
// Copied from Lucene's BytesRef comparator
public class UTF8SortedAsUnicodeComparator implements Comparator<BytesReference> {
public final static Comparator<BytesReference> utf8SortedAsUnicodeSortOrder = new UTF8SortedAsUnicodeComparator();
// Only singleton
private UTF8SortedAsUnicodeComparator() {
}
@Override
public int compare(BytesReference a, BytesReference b) {
if (a.hasArray() && b.hasArray()) {
final byte[] aBytes = a.array();
int aUpto = a.arrayOffset();
final byte[] bBytes = b.array();
int bUpto = b.arrayOffset();
final int aStop = aUpto + Math.min(a.length(), b.length());
while (aUpto < aStop) {
int aByte = aBytes[aUpto++] & 0xff;
int bByte = bBytes[bUpto++] & 0xff;
int diff = aByte - bByte;
if (diff != 0) {
return diff;
}
}
// One is a prefix of the other, or, they are equal:
return a.length() - b.length();
} else {
final byte[] aBytes = a.toBytes();
int aUpto = 0;
final byte[] bBytes = b.toBytes();
int bUpto = 0;
final int aStop = aUpto + Math.min(a.length(), b.length());
while (aUpto < aStop) {
int aByte = aBytes[aUpto++] & 0xff;
int bByte = bBytes[bUpto++] & 0xff;
int diff = aByte - bByte;
if (diff != 0) {
return diff;
}
}
// One is a prefix of the other, or, they are equal:
return a.length() - b.length();
}
}
}

View File

@ -80,7 +80,7 @@ public class PortsRange {
return success;
}
public static interface PortCallback {
public interface PortCallback {
boolean onPortNumber(int portNumber);
}
}

View File

@ -48,5 +48,5 @@ public interface TransportAddress extends Writeable {
boolean isLoopbackOrLinkLocalAddress();
public String toString();
String toString();
}

View File

@ -26,6 +26,6 @@ import org.elasticsearch.common.lease.Releasable;
public interface BigArray extends Releasable, Accountable {
/** Return the length of this array. */
public long size();
long size();
}

Some files were not shown because too many files have changed in this diff Show More