mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 01:19:02 +00:00
Remove runtime version checks
This cleanup commmit removes a large protion of the versioned reads / writes in the network protocol since master requires a full cluster restart.
This commit is contained in:
parent
f4052fd936
commit
219bb88bc2
@ -62,38 +62,29 @@ public class OriginalIndices implements IndicesRequest {
|
||||
}
|
||||
|
||||
public static OriginalIndices readOptionalOriginalIndices(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
boolean empty = in.readBoolean();
|
||||
if (!empty) {
|
||||
return new OriginalIndices(in.readStringArray(), IndicesOptions.readIndicesOptions(in));
|
||||
}
|
||||
}
|
||||
return OriginalIndices.EMPTY;
|
||||
}
|
||||
|
||||
public static void writeOptionalOriginalIndices(OriginalIndices originalIndices, StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
boolean empty = originalIndices == EMPTY;
|
||||
out.writeBoolean(empty);
|
||||
if (!empty) {
|
||||
out.writeStringArrayNullable(originalIndices.indices);
|
||||
originalIndices.indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static OriginalIndices readOriginalIndices(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
boolean empty = in.readBoolean();
|
||||
if (!empty) {
|
||||
return new OriginalIndices(in.readStringArray(), IndicesOptions.readIndicesOptions(in));
|
||||
}
|
||||
return OriginalIndices.EMPTY;
|
||||
}
|
||||
|
||||
|
||||
public static void writeOriginalIndices(OriginalIndices originalIndices, StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
public static void writeOptionalOriginalIndices(OriginalIndices originalIndices, StreamOutput out) throws IOException {
|
||||
boolean empty = originalIndices == EMPTY;
|
||||
out.writeBoolean(empty);
|
||||
if (!empty) {
|
||||
out.writeStringArrayNullable(originalIndices.indices);
|
||||
originalIndices.indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
}
|
||||
|
||||
public static OriginalIndices readOriginalIndices(StreamInput in) throws IOException {
|
||||
return new OriginalIndices(in.readStringArray(), IndicesOptions.readIndicesOptions(in));
|
||||
}
|
||||
|
||||
|
||||
public static void writeOriginalIndices(OriginalIndices originalIndices, StreamOutput out) throws IOException {
|
||||
out.writeStringArrayNullable(originalIndices.indices);
|
||||
originalIndices.indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
}
|
||||
|
@ -166,7 +166,6 @@ public class ClusterHealthRequest extends MasterNodeReadOperationRequest<Cluster
|
||||
waitForRelocatingShards = in.readInt();
|
||||
waitForActiveShards = in.readInt();
|
||||
waitForNodes = in.readString();
|
||||
readLocal(in);
|
||||
if (in.readBoolean()) {
|
||||
waitForEvents = Priority.readFrom(in);
|
||||
}
|
||||
@ -193,7 +192,6 @@ public class ClusterHealthRequest extends MasterNodeReadOperationRequest<Cluster
|
||||
out.writeInt(waitForRelocatingShards);
|
||||
out.writeInt(waitForActiveShards);
|
||||
out.writeString(waitForNodes);
|
||||
writeLocal(out);
|
||||
if (waitForEvents == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
@ -133,11 +133,7 @@ public class PluginInfo implements Streamable, Serializable, ToXContent {
|
||||
this.description = in.readString();
|
||||
this.site = in.readBoolean();
|
||||
this.jvm = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_0_0_RC2)) {
|
||||
this.version = in.readString();
|
||||
} else {
|
||||
this.version = VERSION_NOT_AVAILABLE;
|
||||
}
|
||||
this.version = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -146,9 +142,7 @@ public class PluginInfo implements Streamable, Serializable, ToXContent {
|
||||
out.writeString(description);
|
||||
out.writeBoolean(site);
|
||||
out.writeBoolean(jvm);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_0_0_RC2)) {
|
||||
out.writeString(version);
|
||||
}
|
||||
out.writeString(version);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -218,17 +218,7 @@ public class NodeStats extends NodeOperationResponse implements ToXContent {
|
||||
if (in.readBoolean()) {
|
||||
http = HttpStats.readHttpStats(in);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
} else {
|
||||
// If 1.3.0 or earlier, only a single CircuitBreakerStats can be read
|
||||
CircuitBreakerStats fdStats = CircuitBreakerStats.readOptionalCircuitBreakerStats(in);
|
||||
if (fdStats != null) {
|
||||
CircuitBreakerStats reqStats = new CircuitBreakerStats(CircuitBreaker.Name.REQUEST, 0, 0, 1.0, -1);
|
||||
CircuitBreakerStats parentStats = new CircuitBreakerStats(CircuitBreaker.Name.PARENT, 0, 0, 1.0, -1);
|
||||
breaker = new AllCircuitBreakerStats(new CircuitBreakerStats[] {parentStats, fdStats, reqStats});
|
||||
}
|
||||
}
|
||||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
|
||||
}
|
||||
|
||||
@ -290,12 +280,7 @@ public class NodeStats extends NodeOperationResponse implements ToXContent {
|
||||
out.writeBoolean(true);
|
||||
http.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalStreamable(breaker);
|
||||
} else {
|
||||
// Writing to a 1.3.0 or earlier stream expects only a single breaker stats
|
||||
out.writeOptionalStreamable(breaker == null ? null : breaker.getStats(CircuitBreaker.Name.FIELDDATA));
|
||||
}
|
||||
out.writeOptionalStreamable(breaker);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -88,13 +88,11 @@ public class GetRepositoriesRequest extends MasterNodeReadOperationRequest<GetRe
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
repositories = in.readStringArray();
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(repositories);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -285,12 +285,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||
type = in.readString();
|
||||
settings = readSettingsFromStream(in);
|
||||
readTimeout(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
verify = in.readBoolean();
|
||||
} else {
|
||||
// we received this request from an older client that doesn't expect us to validate the request
|
||||
verify = false;
|
||||
}
|
||||
verify = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -300,8 +295,6 @@ public class PutRepositoryRequest extends AcknowledgedRequest<PutRepositoryReque
|
||||
out.writeString(type);
|
||||
writeSettingsToStream(settings, out);
|
||||
writeTimeout(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
out.writeBoolean(verify);
|
||||
}
|
||||
out.writeBoolean(verify);
|
||||
}
|
||||
}
|
||||
|
@ -126,11 +126,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
||||
super.readFrom(in);
|
||||
commands = AllocationCommands.readFrom(in);
|
||||
dryRun = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
explain = in.readBoolean();
|
||||
} else {
|
||||
explain = false;
|
||||
}
|
||||
explain = in.readBoolean();
|
||||
readTimeout(in);
|
||||
}
|
||||
|
||||
@ -139,9 +135,7 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
||||
super.writeTo(out);
|
||||
AllocationCommands.writeTo(commands, out);
|
||||
out.writeBoolean(dryRun);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBoolean(explain);
|
||||
}
|
||||
out.writeBoolean(explain);
|
||||
writeTimeout(out);
|
||||
}
|
||||
}
|
||||
|
@ -62,11 +62,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse {
|
||||
super.readFrom(in);
|
||||
state = ClusterState.Builder.readFrom(in, null, null);
|
||||
readAcknowledged(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
explanations = RoutingExplanations.readFrom(in);
|
||||
} else {
|
||||
explanations = new RoutingExplanations();
|
||||
}
|
||||
explanations = RoutingExplanations.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -74,8 +70,6 @@ public class ClusterRerouteResponse extends AcknowledgedResponse {
|
||||
super.writeTo(out);
|
||||
ClusterState.Builder.writeTo(state, out);
|
||||
writeAcknowledged(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
RoutingExplanations.writeTo(explanations, out);
|
||||
}
|
||||
RoutingExplanations.writeTo(explanations, out);
|
||||
}
|
||||
}
|
||||
|
@ -159,7 +159,6 @@ public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<C
|
||||
|
||||
types = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readLocal(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -176,7 +175,6 @@ public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest<C
|
||||
|
||||
out.writeStringArray(types);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeLocal(out);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -560,10 +560,8 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||
renameReplacement = in.readOptionalString();
|
||||
waitForCompletion = in.readBoolean();
|
||||
includeGlobalState = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
partial = in.readBoolean();
|
||||
includeAliases = in.readBoolean();
|
||||
}
|
||||
partial = in.readBoolean();
|
||||
includeAliases = in.readBoolean();
|
||||
settings = readSettingsFromStream(in);
|
||||
}
|
||||
|
||||
@ -578,10 +576,8 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest<RestoreSn
|
||||
out.writeOptionalString(renameReplacement);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
out.writeBoolean(includeGlobalState);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
out.writeBoolean(partial);
|
||||
out.writeBoolean(includeAliases);
|
||||
}
|
||||
out.writeBoolean(partial);
|
||||
out.writeBoolean(includeAliases);
|
||||
writeSettingsToStream(settings, out);
|
||||
}
|
||||
}
|
||||
|
@ -127,11 +127,6 @@ public class ClusterStateRequest extends MasterNodeReadOperationRequest<ClusterS
|
||||
metaData = in.readBoolean();
|
||||
blocks = in.readBoolean();
|
||||
indices = in.readStringArray();
|
||||
// fake support for indices in pre 1.2.0 versions
|
||||
if (in.getVersion().before(Version.V_1_2_0)) {
|
||||
in.readStringArray();
|
||||
}
|
||||
readLocal(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -142,10 +137,5 @@ public class ClusterStateRequest extends MasterNodeReadOperationRequest<ClusterS
|
||||
out.writeBoolean(metaData);
|
||||
out.writeBoolean(blocks);
|
||||
out.writeStringArray(indices);
|
||||
// fake support for indices in pre 1.2.0 versions
|
||||
if (out.getVersion().before(Version.V_1_2_0)) {
|
||||
out.writeStringArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
writeLocal(out);
|
||||
}
|
||||
}
|
||||
|
@ -36,15 +36,4 @@ public class PendingClusterTasksRequest extends MasterNodeReadOperationRequest<P
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,6 @@ public class GetAliasesRequest extends MasterNodeReadOperationRequest<GetAliases
|
||||
indices = in.readStringArray();
|
||||
aliases = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -99,6 +98,5 @@ public class GetAliasesRequest extends MasterNodeReadOperationRequest<GetAliases
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(aliases);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -143,9 +143,7 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
|
||||
analyzer = in.readOptionalString();
|
||||
tokenizer = in.readOptionalString();
|
||||
tokenFilters = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
charFilters = in.readStringArray();
|
||||
}
|
||||
charFilters = in.readStringArray();
|
||||
field = in.readOptionalString();
|
||||
}
|
||||
|
||||
@ -156,9 +154,7 @@ public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest>
|
||||
out.writeOptionalString(analyzer);
|
||||
out.writeOptionalString(tokenizer);
|
||||
out.writeStringArray(tokenFilters);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeStringArray(charFilters);
|
||||
}
|
||||
out.writeStringArray(charFilters);
|
||||
out.writeOptionalString(field);
|
||||
}
|
||||
}
|
||||
|
@ -118,9 +118,7 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
|
||||
recycler = in.readBoolean();
|
||||
fields = in.readStringArray();
|
||||
filterKeys = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readBoolean();
|
||||
}
|
||||
queryCache = in.readBoolean();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
@ -131,8 +129,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
|
||||
out.writeBoolean(recycler);
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeStringArrayNullable(filterKeys);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(queryCache);
|
||||
}
|
||||
out.writeBoolean(queryCache);
|
||||
}
|
||||
}
|
||||
|
@ -97,9 +97,7 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
|
||||
recycler = in.readBoolean();
|
||||
fields = in.readStringArray();
|
||||
filterKeys = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readBoolean();
|
||||
}
|
||||
queryCache = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -111,8 +109,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
|
||||
out.writeBoolean(recycler);
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeStringArrayNullable(filterKeys);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(queryCache);
|
||||
}
|
||||
out.writeBoolean(queryCache);
|
||||
}
|
||||
}
|
||||
|
@ -461,11 +461,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
||||
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
|
||||
customs.put(type, customIndexMetaData);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
aliases.add(Alias.read(in));
|
||||
}
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
aliases.add(Alias.read(in));
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,11 +484,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
||||
out.writeString(entry.getKey());
|
||||
IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +76,6 @@ public class IndicesExistsRequest extends MasterNodeReadOperationRequest<Indices
|
||||
super.readFrom(in);
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -84,6 +83,5 @@ public class IndicesExistsRequest extends MasterNodeReadOperationRequest<Indices
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -94,7 +94,6 @@ public class TypesExistsRequest extends MasterNodeReadOperationRequest<TypesExis
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(types);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -103,6 +102,5 @@ public class TypesExistsRequest extends MasterNodeReadOperationRequest<TypesExis
|
||||
indices = in.readStringArray();
|
||||
types = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -116,9 +116,7 @@ public class FlushRequest extends BroadcastOperationRequest<FlushRequest> {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(full);
|
||||
out.writeBoolean(force);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(waitIfOngoing);
|
||||
}
|
||||
out.writeBoolean(waitIfOngoing);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -126,11 +124,7 @@ public class FlushRequest extends BroadcastOperationRequest<FlushRequest> {
|
||||
super.readFrom(in);
|
||||
full = in.readBoolean();
|
||||
force = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
waitIfOngoing = in.readBoolean();
|
||||
} else {
|
||||
waitIfOngoing = false;
|
||||
}
|
||||
waitIfOngoing = in.readBoolean();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -63,11 +63,7 @@ class ShardFlushRequest extends BroadcastShardOperationRequest {
|
||||
super.readFrom(in);
|
||||
full = in.readBoolean();
|
||||
force = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
waitIfOngoing = in.readBoolean();
|
||||
} else {
|
||||
waitIfOngoing = false;
|
||||
}
|
||||
waitIfOngoing = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -75,8 +71,6 @@ class ShardFlushRequest extends BroadcastShardOperationRequest {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(full);
|
||||
out.writeBoolean(force);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(waitIfOngoing);
|
||||
}
|
||||
out.writeBoolean(waitIfOngoing);
|
||||
}
|
||||
}
|
||||
|
@ -155,9 +155,7 @@ public class OptimizeRequest extends BroadcastOperationRequest<OptimizeRequest>
|
||||
maxNumSegments = in.readInt();
|
||||
onlyExpungeDeletes = in.readBoolean();
|
||||
flush = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
upgrade = in.readBoolean();
|
||||
}
|
||||
upgrade = in.readBoolean();
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
@ -166,8 +164,6 @@ public class OptimizeRequest extends BroadcastOperationRequest<OptimizeRequest>
|
||||
out.writeInt(maxNumSegments);
|
||||
out.writeBoolean(onlyExpungeDeletes);
|
||||
out.writeBoolean(flush);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBoolean(upgrade);
|
||||
}
|
||||
out.writeBoolean(upgrade);
|
||||
}
|
||||
}
|
||||
|
@ -78,9 +78,7 @@ class ShardOptimizeRequest extends BroadcastShardOperationRequest {
|
||||
maxNumSegments = in.readInt();
|
||||
onlyExpungeDeletes = in.readBoolean();
|
||||
flush = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
upgrade = in.readBoolean();
|
||||
}
|
||||
upgrade = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -90,8 +88,6 @@ class ShardOptimizeRequest extends BroadcastShardOperationRequest {
|
||||
out.writeInt(maxNumSegments);
|
||||
out.writeBoolean(onlyExpungeDeletes);
|
||||
out.writeBoolean(flush);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBoolean(upgrade);
|
||||
}
|
||||
out.writeBoolean(upgrade);
|
||||
}
|
||||
}
|
||||
|
@ -84,7 +84,6 @@ public class GetSettingsRequest extends MasterNodeReadOperationRequest<GetSettin
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
names = in.readStringArray();
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -93,6 +92,5 @@ public class GetSettingsRequest extends MasterNodeReadOperationRequest<GetSettin
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeStringArray(names);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -562,12 +562,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
segments = SegmentsStats.readSegmentsStats(in);
|
||||
}
|
||||
translog = in.readOptionalStreamable(new TranslogStats());
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
suggest = in.readOptionalStreamable(new SuggestStats());
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readOptionalStreamable(new QueryCacheStats());
|
||||
}
|
||||
suggest = in.readOptionalStreamable(new SuggestStats());
|
||||
queryCache = in.readOptionalStreamable(new QueryCacheStats());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -663,12 +659,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
segments.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(translog);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeOptionalStreamable(suggest);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalStreamable(queryCache);
|
||||
}
|
||||
out.writeOptionalStreamable(suggest);
|
||||
out.writeOptionalStreamable(queryCache);
|
||||
}
|
||||
|
||||
// note, requires a wrapping object
|
||||
|
@ -77,13 +77,11 @@ public class GetIndexTemplatesRequest extends MasterNodeReadOperationRequest<Get
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
names = in.readStringArray();
|
||||
readLocal(in, Version.V_1_0_0_RC2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(names);
|
||||
writeLocal(out, Version.V_1_0_0_RC2);
|
||||
}
|
||||
}
|
||||
|
@ -445,11 +445,9 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndex
|
||||
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in);
|
||||
customs.put(type, customIndexMetaData);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
aliases.add(Alias.read(in));
|
||||
}
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
aliases.add(Alias.read(in));
|
||||
}
|
||||
}
|
||||
|
||||
@ -472,11 +470,9 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest<PutIndex
|
||||
out.writeString(entry.getKey());
|
||||
IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,9 +69,7 @@ public class GetWarmersResponse extends ActionResponse {
|
||||
String[] types = in.readStringArray();
|
||||
BytesReference source = in.readBytesReference();
|
||||
Boolean queryCache = null;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readOptionalBoolean();
|
||||
}
|
||||
queryCache = in.readOptionalBoolean();
|
||||
warmerEntryBuilder.add(new IndexWarmersMetaData.Entry(
|
||||
name,
|
||||
types,
|
||||
@ -95,9 +93,7 @@ public class GetWarmersResponse extends ActionResponse {
|
||||
out.writeString(warmerEntry.name());
|
||||
out.writeStringArray(warmerEntry.types());
|
||||
out.writeBytesReference(warmerEntry.source());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(warmerEntry.queryCache());
|
||||
}
|
||||
out.writeOptionalBoolean(warmerEntry.queryCache());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -102,12 +102,10 @@ public class BulkItemRequest implements Streamable {
|
||||
request = new UpdateRequest();
|
||||
}
|
||||
request.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_3)) {
|
||||
if (in.readBoolean()) {
|
||||
primaryResponse = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
ignoreOnReplica = in.readBoolean();
|
||||
if (in.readBoolean()) {
|
||||
primaryResponse = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
ignoreOnReplica = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -121,9 +119,7 @@ public class BulkItemRequest implements Streamable {
|
||||
out.writeByte((byte) 2);
|
||||
}
|
||||
request.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_3)) {
|
||||
out.writeOptionalStreamable(primaryResponse);
|
||||
out.writeBoolean(ignoreOnReplica);
|
||||
}
|
||||
out.writeOptionalStreamable(primaryResponse);
|
||||
out.writeBoolean(ignoreOnReplica);
|
||||
}
|
||||
}
|
||||
|
@ -250,10 +250,7 @@ public class CountRequest extends BroadcastOperationRequest<CountRequest> {
|
||||
sourceUnsafe = false;
|
||||
source = in.readBytesReference();
|
||||
types = in.readStringArray();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminateAfter = in.readVInt();
|
||||
}
|
||||
terminateAfter = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -264,10 +261,7 @@ public class CountRequest extends BroadcastOperationRequest<CountRequest> {
|
||||
out.writeOptionalString(preference);
|
||||
out.writeBytesReference(source);
|
||||
out.writeStringArray(types);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeVInt(terminateAfter);
|
||||
}
|
||||
out.writeVInt(terminateAfter);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -86,17 +86,13 @@ public class CountResponse extends BroadcastOperationResponse {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
count = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminatedEarly = in.readBoolean();
|
||||
}
|
||||
terminatedEarly = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVLong(count);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(terminatedEarly);
|
||||
}
|
||||
out.writeBoolean(terminatedEarly);
|
||||
}
|
||||
}
|
||||
|
@ -109,12 +109,7 @@ class ShardCountRequest extends BroadcastShardOperationRequest {
|
||||
}
|
||||
}
|
||||
nowInMillis = in.readVLong();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminateAfter = in.readVInt();
|
||||
} else {
|
||||
terminateAfter = DEFAULT_TERMINATE_AFTER;
|
||||
}
|
||||
terminateAfter = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -137,9 +132,6 @@ class ShardCountRequest extends BroadcastShardOperationRequest {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
out.writeVLong(nowInMillis);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeVInt(terminateAfter);
|
||||
}
|
||||
out.writeVInt(terminateAfter);
|
||||
}
|
||||
}
|
||||
|
@ -59,17 +59,13 @@ class ShardCountResponse extends BroadcastShardOperationResponse {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
count = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminatedEarly = in.readBoolean();
|
||||
}
|
||||
terminatedEarly = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVLong(count);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(terminatedEarly);
|
||||
}
|
||||
out.writeBoolean(terminatedEarly);
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ public class DeleteRequest extends ShardReplicationOperationRequest<DeleteReques
|
||||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
version = Versions.readVersion(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ public class DeleteRequest extends ShardReplicationOperationRequest<DeleteReques
|
||||
out.writeString(id);
|
||||
out.writeOptionalString(routing());
|
||||
out.writeBoolean(refresh);
|
||||
Versions.writeVersion(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
}
|
||||
|
||||
|
@ -107,10 +107,8 @@ public class ShardDeleteRequest extends ShardReplicationOperationRequest<ShardDe
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
refresh = in.readBoolean();
|
||||
version = Versions.readVersion(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
originalIndex = in.readOptionalString();
|
||||
}
|
||||
version = in.readLong();
|
||||
originalIndex = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -120,9 +118,7 @@ public class ShardDeleteRequest extends ShardReplicationOperationRequest<ShardDe
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeBoolean(refresh);
|
||||
Versions.writeVersion(version, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalString(originalIndex);
|
||||
}
|
||||
out.writeLong(version);
|
||||
out.writeOptionalString(originalIndex);
|
||||
}
|
||||
}
|
||||
|
@ -136,11 +136,7 @@ public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest<
|
||||
}
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
nowInMillis = in.readVLong();
|
||||
} else {
|
||||
nowInMillis = System.currentTimeMillis();
|
||||
}
|
||||
nowInMillis = in.readVLong();
|
||||
originalIndices = OriginalIndices.readOptionalOriginalIndices(in);
|
||||
}
|
||||
|
||||
@ -166,9 +162,7 @@ public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest<
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeVLong(nowInMillis);
|
||||
}
|
||||
out.writeVLong(nowInMillis);
|
||||
OriginalIndices.writeOptionalOriginalIndices(originalIndices, out);
|
||||
}
|
||||
|
||||
|
@ -97,11 +97,9 @@ public class ExplainResponse extends ActionResponse {
|
||||
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
}
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
exists = in.readBoolean();
|
||||
if (in.readBoolean()) {
|
||||
explanation = readExplanation(in);
|
||||
@ -113,11 +111,9 @@ public class ExplainResponse extends ActionResponse {
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeString(index);
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
}
|
||||
out.writeString(index);
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeBoolean(exists);
|
||||
if (explanation == null) {
|
||||
out.writeBoolean(false);
|
||||
|
@ -305,12 +305,10 @@ public class GetRequest extends SingleShardOperationRequest<GetRequest> {
|
||||
} else if (realtime == 1) {
|
||||
this.realtime = true;
|
||||
}
|
||||
if(in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
this.version = Versions.readVersionWithVLongForBW(in);
|
||||
this.version = in.readLong();
|
||||
|
||||
fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
|
||||
}
|
||||
@ -339,11 +337,9 @@ public class GetRequest extends SingleShardOperationRequest<GetRequest> {
|
||||
} else {
|
||||
out.writeByte((byte) 1);
|
||||
}
|
||||
if(out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
}
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
out.writeByte(versionType.getValue());
|
||||
Versions.writeVersionWithVLongForBW(version, out);
|
||||
out.writeLong(version);
|
||||
|
||||
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
||||
fields[i] = in.readString();
|
||||
}
|
||||
}
|
||||
version = Versions.readVersionWithVLongForBW(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
|
||||
fetchSourceContext = FetchSourceContext.optionalReadFromStream(in);
|
||||
@ -206,7 +206,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
||||
}
|
||||
}
|
||||
|
||||
Versions.writeVersionWithVLongForBW(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
|
||||
FetchSourceContext.optionalWriteToStream(fetchSourceContext, out);
|
||||
@ -513,9 +513,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
||||
} else if (realtime == 1) {
|
||||
this.realtime = true;
|
||||
}
|
||||
if(in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
int size = in.readVInt();
|
||||
items = new ArrayList<>(size);
|
||||
@ -536,9 +534,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
||||
} else {
|
||||
out.writeByte((byte) 1);
|
||||
}
|
||||
if(out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
}
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
|
@ -121,50 +121,9 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
|
||||
locations = new IntArrayList(size);
|
||||
items = new ArrayList<>(size);
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
items.add(MultiGetRequest.Item.readItem(in));
|
||||
}
|
||||
} else {
|
||||
List<String> types = new ArrayList<>(size);
|
||||
List<String> ids = new ArrayList<>(size);
|
||||
List<String[]> fields = new ArrayList<>(size);
|
||||
LongArrayList versions = new LongArrayList(size);
|
||||
List<VersionType> versionTypes = new ArrayList<>(size);
|
||||
List<FetchSourceContext> fetchSourceContexts = new ArrayList<>(size);
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
if (in.readBoolean()) {
|
||||
types.add(in.readSharedString());
|
||||
} else {
|
||||
types.add(null);
|
||||
}
|
||||
ids.add(in.readString());
|
||||
int size1 = in.readVInt();
|
||||
if (size1 > 0) {
|
||||
String[] fieldsArray = new String[size1];
|
||||
for (int j = 0; j < size1; j++) {
|
||||
fieldsArray[j] = in.readString();
|
||||
}
|
||||
fields.add(fieldsArray);
|
||||
} else {
|
||||
fields.add(null);
|
||||
}
|
||||
versions.add(Versions.readVersionWithVLongForBW(in));
|
||||
versionTypes.add(VersionType.fromValue(in.readByte()));
|
||||
|
||||
fetchSourceContexts.add(FetchSourceContext.optionalReadFromStream(in));
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; i++) {
|
||||
//before 1.4 we have only one index, the concrete one
|
||||
MultiGetRequest.Item item = new MultiGetRequest.Item(index, types.get(i), ids.get(i))
|
||||
.fields(fields.get(i)).version(versions.get(i)).versionType(versionTypes.get(i))
|
||||
.fetchSourceContext(fetchSourceContexts.get(i));
|
||||
items.add(item);
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
items.add(MultiGetRequest.Item.readItem(in));
|
||||
}
|
||||
|
||||
preference = in.readOptionalString();
|
||||
@ -175,9 +134,7 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
|
||||
} else if (realtime == 1) {
|
||||
this.realtime = true;
|
||||
}
|
||||
if(in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -185,34 +142,9 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
|
||||
super.writeTo(out);
|
||||
out.writeVInt(locations.size());
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
for (int i = 0; i < locations.size(); i++) {
|
||||
out.writeVInt(locations.get(i));
|
||||
items.get(i).writeTo(out);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < locations.size(); i++) {
|
||||
out.writeVInt(locations.get(i));
|
||||
MultiGetRequest.Item item = items.get(i);
|
||||
if (item.type() == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeSharedString(item.type());
|
||||
}
|
||||
out.writeString(item.id());
|
||||
if (item.fields() == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(item.fields().length);
|
||||
for (String field : item.fields()) {
|
||||
out.writeString(field);
|
||||
}
|
||||
}
|
||||
Versions.writeVersionWithVLongForBW(item.version(), out);
|
||||
out.writeByte(item.versionType().getValue());
|
||||
FetchSourceContext.optionalWriteToStream(item.fetchSourceContext(), out);
|
||||
}
|
||||
for (int i = 0; i < locations.size(); i++) {
|
||||
out.writeVInt(locations.get(i));
|
||||
items.get(i).writeTo(out);
|
||||
}
|
||||
|
||||
out.writeOptionalString(preference);
|
||||
@ -224,9 +156,7 @@ public class MultiGetShardRequest extends SingleShardOperationRequest<MultiGetSh
|
||||
} else {
|
||||
out.writeByte((byte) 1);
|
||||
}
|
||||
if(out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
}
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
}
|
||||
|
||||
|
@ -686,11 +686,9 @@ public class IndexRequest extends ShardReplicationOperationRequest<IndexRequest>
|
||||
|
||||
opType = OpType.fromId(in.readByte());
|
||||
refresh = in.readBoolean();
|
||||
version = Versions.readVersion(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
autoGeneratedId = in.readBoolean();
|
||||
}
|
||||
autoGeneratedId = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -705,11 +703,9 @@ public class IndexRequest extends ShardReplicationOperationRequest<IndexRequest>
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
out.writeBoolean(refresh);
|
||||
Versions.writeVersion(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeBoolean(autoGeneratedId);
|
||||
}
|
||||
out.writeBoolean(autoGeneratedId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -146,7 +146,7 @@ public class DeleteIndexedScriptRequest extends ActionRequest<DeleteIndexedScrip
|
||||
super.readFrom(in);
|
||||
scriptLang = in.readString();
|
||||
id = in.readString();
|
||||
version = Versions.readVersion(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ public class DeleteIndexedScriptRequest extends ActionRequest<DeleteIndexedScrip
|
||||
super.writeTo(out);
|
||||
out.writeString(scriptLang);
|
||||
out.writeString(id);
|
||||
Versions.writeVersion(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ public class GetIndexedScriptRequest extends ActionRequest<GetIndexedScriptReque
|
||||
in.readByte(); //Realtime
|
||||
}
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
this.version = Versions.readVersionWithVLongForBW(in);
|
||||
this.version = in.readLong();
|
||||
|
||||
if (in.getVersion().before(Version.V_1_5_0)) {
|
||||
FetchSourceContext.optionalReadFromStream(in);
|
||||
@ -182,7 +182,7 @@ public class GetIndexedScriptRequest extends ActionRequest<GetIndexedScriptReque
|
||||
}
|
||||
|
||||
out.writeByte(versionType.getValue());
|
||||
Versions.writeVersionWithVLongForBW(version, out);
|
||||
out.writeLong(version);
|
||||
|
||||
if (out.getVersion().before(Version.V_1_5_0)) {
|
||||
FetchSourceContext.optionalWriteToStream(null, out);
|
||||
|
@ -347,7 +347,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
|
||||
sourceUnsafe = false;
|
||||
|
||||
opType = IndexRequest.OpType.fromId(in.readByte());
|
||||
version = Versions.readVersion(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
}
|
||||
|
||||
@ -358,7 +358,7 @@ public class PutIndexedScriptRequest extends ActionRequest<PutIndexedScriptReque
|
||||
out.writeOptionalString(id);
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.id());
|
||||
Versions.writeVersion(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
}
|
||||
|
||||
|
@ -611,11 +611,7 @@ public class MoreLikeThisRequest extends ActionRequest<MoreLikeThisRequest> impl
|
||||
}
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
minimumShouldMatch(in.readString());
|
||||
} else {
|
||||
percentTermsToMatch(in.readFloat());
|
||||
}
|
||||
minimumShouldMatch(in.readString());
|
||||
|
||||
minTermFreq = in.readVInt();
|
||||
maxQueryTerms = in.readVInt();
|
||||
@ -631,19 +627,9 @@ public class MoreLikeThisRequest extends ActionRequest<MoreLikeThisRequest> impl
|
||||
minWordLength = in.readVInt();
|
||||
maxWordLength = in.readVInt();
|
||||
boostTerms = in.readFloat();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
include = in.readBoolean();
|
||||
} else {
|
||||
include = false; // hard-coded behavior until Elasticsearch 1.2
|
||||
}
|
||||
include = in.readBoolean();
|
||||
|
||||
searchType = SearchType.fromId(in.readByte());
|
||||
if (in.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//searchQueryHint was unused and removed in 1.4
|
||||
if (in.readBoolean()) {
|
||||
in.readString();
|
||||
}
|
||||
}
|
||||
size = in.readVInt();
|
||||
if (size == 0) {
|
||||
searchIndices = null;
|
||||
@ -693,11 +679,7 @@ public class MoreLikeThisRequest extends ActionRequest<MoreLikeThisRequest> impl
|
||||
}
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
out.writeString(minimumShouldMatch);
|
||||
} else {
|
||||
out.writeFloat(percentTermsToMatch());
|
||||
}
|
||||
out.writeString(minimumShouldMatch);
|
||||
|
||||
out.writeVInt(minTermFreq);
|
||||
out.writeVInt(maxQueryTerms);
|
||||
@ -714,15 +696,9 @@ public class MoreLikeThisRequest extends ActionRequest<MoreLikeThisRequest> impl
|
||||
out.writeVInt(minWordLength);
|
||||
out.writeVInt(maxWordLength);
|
||||
out.writeFloat(boostTerms);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeBoolean(include);
|
||||
}
|
||||
out.writeBoolean(include);
|
||||
|
||||
out.writeByte(searchType.id());
|
||||
if (out.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//searchQueryHint was unused and removed in 1.4
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
if (searchIndices == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
|
@ -110,9 +110,7 @@ public class PercolateShardRequest extends BroadcastShardOperationRequest {
|
||||
source = in.readBytesReference();
|
||||
docSource = in.readBytesReference();
|
||||
onlyCount = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
numberOfShards = in.readVInt();
|
||||
}
|
||||
numberOfShards = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -122,9 +120,7 @@ public class PercolateShardRequest extends BroadcastShardOperationRequest {
|
||||
out.writeBytesReference(source);
|
||||
out.writeBytesReference(docSource);
|
||||
out.writeBoolean(onlyCount);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeVInt(numberOfShards);
|
||||
}
|
||||
out.writeVInt(numberOfShards);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -76,21 +76,13 @@ public class ClearScrollResponse extends ActionResponse implements StatusToXCont
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
succeeded = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
numFreed = in.readVInt();
|
||||
} else {
|
||||
// On older nodes we can't tell how many search contexts where freed, so we assume at least one,
|
||||
// so that the rest api doesn't return 404 where SC were indeed freed.
|
||||
numFreed = 1;
|
||||
}
|
||||
numFreed = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(succeeded);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeVInt(numFreed);
|
||||
}
|
||||
out.writeVInt(numFreed);
|
||||
}
|
||||
}
|
||||
|
@ -571,29 +571,19 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
types = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
templateSourceUnsafe = false;
|
||||
templateSource = in.readBytesReference();
|
||||
templateName = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
templateType = ScriptService.ScriptType.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
templateParams = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readOptionalBoolean();
|
||||
templateSourceUnsafe = false;
|
||||
templateSource = in.readBytesReference();
|
||||
templateName = in.readOptionalString();
|
||||
templateType = ScriptService.ScriptType.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
templateParams = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
queryCache = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_1_2_0)) {
|
||||
out.writeByte((byte) 2); // operation threading
|
||||
}
|
||||
out.writeByte(searchType.id());
|
||||
|
||||
out.writeVInt(indices.length);
|
||||
@ -615,21 +605,15 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
out.writeStringArray(types);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBytesReference(templateSource);
|
||||
out.writeOptionalString(templateName);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
ScriptService.ScriptType.writeTo(templateType, out);
|
||||
}
|
||||
boolean existTemplateParams = templateParams != null;
|
||||
out.writeBoolean(existTemplateParams);
|
||||
if (existTemplateParams) {
|
||||
out.writeGenericValue(templateParams);
|
||||
}
|
||||
out.writeBytesReference(templateSource);
|
||||
out.writeOptionalString(templateName);
|
||||
ScriptService.ScriptType.writeTo(templateType, out);
|
||||
boolean existTemplateParams = templateParams != null;
|
||||
out.writeBoolean(existTemplateParams);
|
||||
if (existTemplateParams) {
|
||||
out.writeGenericValue(templateParams);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
}
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
}
|
||||
}
|
||||
|
@ -35,8 +35,6 @@ public class ParsedScrollId {
|
||||
|
||||
public static final String SCAN = "scan";
|
||||
|
||||
public static final Version SCROLL_SEARCH_AFTER_MINIMUM_VERSION = Version.V_1_2_0;
|
||||
|
||||
private final String source;
|
||||
|
||||
private final String type;
|
||||
|
@ -124,9 +124,6 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
|
||||
Tuple<String, Long> target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
if (node != null) {
|
||||
if (node.getVersion().before(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
useSlowScroll = true;
|
||||
}
|
||||
executePhase(i, node, target.v2());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -131,9 +131,6 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
|
||||
Tuple<String, Long> target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
if (node != null) {
|
||||
if (node.getVersion().before(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
useSlowScroll = true;
|
||||
}
|
||||
executeQueryPhase(i, counter, node, target.v2());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -124,18 +124,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
// Not so nice, but we need to know if there're nodes below the supported version
|
||||
// and if so fall back to classic scroll (based on from). We need to check every node
|
||||
// because we don't to what nodes we end up sending the request (shard may fail or relocate)
|
||||
boolean useSlowScroll = false;
|
||||
if (request.scroll() != null) {
|
||||
for (DiscoveryNode discoveryNode : clusterState.nodes()) {
|
||||
if (discoveryNode.getVersion().before(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
useSlowScroll = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
this.useSlowScroll = useSlowScroll;
|
||||
this.useSlowScroll = false;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
|
@ -108,17 +108,7 @@ public class IndicesOptions {
|
||||
}
|
||||
|
||||
public void writeIndicesOptions(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_2)) {
|
||||
out.write(id);
|
||||
} else if (out.getVersion().before(Version.V_1_2_0)) {
|
||||
// Target node doesn't know about the FORBID_CLOSED_INDICES and FORBID_ALIASES_TO_MULTIPLE_INDICES flags,
|
||||
// so unset the bits starting from the 5th position.
|
||||
out.write(id & 0xf);
|
||||
} else {
|
||||
// Target node doesn't know about the FORBID_CLOSED_INDICES flag,
|
||||
// so unset the bits starting from the 6th position.
|
||||
out.write(id & 0x1f);
|
||||
}
|
||||
out.write(id);
|
||||
}
|
||||
|
||||
public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException {
|
||||
|
@ -42,35 +42,15 @@ public abstract class MasterNodeReadOperationRequest<T extends MasterNodeReadOpe
|
||||
return local;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the local flag
|
||||
*/
|
||||
protected void readLocal(StreamInput in) throws IOException {
|
||||
readLocal(in, null);
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(local);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the local flag if on or after the specified min version or if the version is <code>null</code>.
|
||||
*/
|
||||
protected void readLocal(StreamInput in, Version minVersion) throws IOException {
|
||||
if (minVersion == null || in.getVersion().onOrAfter(minVersion)) {
|
||||
local = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* writes the local flag
|
||||
*/
|
||||
protected void writeLocal(StreamOutput out) throws IOException {
|
||||
writeLocal(out, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* writes the local flag if on or after the specified min version or if the version is <code>null</code>.
|
||||
*/
|
||||
protected void writeLocal(StreamOutput out, Version minVersion) throws IOException {
|
||||
if (minVersion == null || out.getVersion().onOrAfter(minVersion)) {
|
||||
out.writeBoolean(local);
|
||||
}
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
local = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +76,6 @@ public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends M
|
||||
indices = in.readStringArray();
|
||||
types = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
readLocal(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -85,6 +84,5 @@ public abstract class ClusterInfoRequest<T extends ClusterInfoRequest> extends M
|
||||
out.writeStringArray(indices);
|
||||
out.writeStringArray(types);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
writeLocal(out);
|
||||
}
|
||||
}
|
||||
|
@ -201,9 +201,7 @@ public abstract class ShardReplicationOperationRequest<T extends ShardReplicatio
|
||||
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
|
||||
timeout = TimeValue.readTimeValue(in);
|
||||
index = in.readSharedString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
canHaveDuplicates = in.readBoolean();
|
||||
}
|
||||
canHaveDuplicates = in.readBoolean();
|
||||
// no need to serialize threaded* parameters, since they only matter locally
|
||||
}
|
||||
|
||||
@ -214,9 +212,7 @@ public abstract class ShardReplicationOperationRequest<T extends ShardReplicatio
|
||||
out.writeByte(consistencyLevel.id());
|
||||
timeout.writeTo(out);
|
||||
out.writeSharedString(index);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeBoolean(canHaveDuplicates);
|
||||
}
|
||||
out.writeBoolean(canHaveDuplicates);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -290,11 +290,7 @@ public abstract class TransportShardReplicationOperationAction<Request extends S
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int shard = -1;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId = ShardId.readShardId(in);
|
||||
} else {
|
||||
shard = in.readVInt();
|
||||
}
|
||||
shardId = ShardId.readShardId(in);
|
||||
request = newReplicaRequestInstance();
|
||||
request.readFrom(in);
|
||||
if (in.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
@ -307,13 +303,7 @@ public abstract class TransportShardReplicationOperationAction<Request extends S
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId.writeTo(out);
|
||||
} else {
|
||||
out.writeVInt(shardId.id());
|
||||
//older nodes expect the concrete index as part of the request
|
||||
request.index(shardId.getIndex());
|
||||
}
|
||||
shardId.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
@ -346,27 +346,14 @@ public abstract class TransportSingleCustomOperationAction<Request extends Singl
|
||||
super.readFrom(in);
|
||||
request = newRequest();
|
||||
request.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId = ShardId.readShardId(in);
|
||||
} else {
|
||||
//older nodes will send the concrete index as part of the request
|
||||
shardId = new ShardId(request.index(), in.readVInt());
|
||||
}
|
||||
shardId = ShardId.readShardId(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//older nodes expect the concrete index as part of the request
|
||||
request.index(shardId.getIndex());
|
||||
}
|
||||
request.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId.writeTo(out);
|
||||
} else {
|
||||
out.writeVInt(shardId.id());
|
||||
}
|
||||
shardId.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -338,27 +338,14 @@ public abstract class TransportShardSingleOperationAction<Request extends Single
|
||||
super.readFrom(in);
|
||||
request = newRequest();
|
||||
request.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId = ShardId.readShardId(in);
|
||||
} else {
|
||||
//older nodes will send the concrete index as part of the request
|
||||
shardId = new ShardId(request.index(), in.readVInt());
|
||||
}
|
||||
shardId = ShardId.readShardId(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//older nodes expect the concrete index as part of the request
|
||||
request.index(shardId.getIndex());
|
||||
}
|
||||
request.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
shardId.writeTo(out);
|
||||
} else {
|
||||
out.writeVInt(shardId.id());
|
||||
}
|
||||
shardId.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,17 +386,11 @@ public class TermVectorsRequest extends SingleShardOperationRequest<TermVectorsR
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//term vector used to read & write the index twice, here and in the parent class
|
||||
in.readString();
|
||||
}
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
if (in.readBoolean()) {
|
||||
doc = in.readBytesReference();
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
doc = in.readBytesReference();
|
||||
}
|
||||
routing = in.readOptionalString();
|
||||
preference = in.readOptionalString();
|
||||
@ -415,29 +409,21 @@ public class TermVectorsRequest extends SingleShardOperationRequest<TermVectorsR
|
||||
selectedFields.add(in.readString());
|
||||
}
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
if (in.readBoolean()) {
|
||||
perFieldAnalyzer = readPerFieldAnalyzer(in.readMap());
|
||||
}
|
||||
this.realtime = in.readBoolean();
|
||||
if (in.readBoolean()) {
|
||||
perFieldAnalyzer = readPerFieldAnalyzer(in.readMap());
|
||||
}
|
||||
this.realtime = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_1_4_0_Beta1)) {
|
||||
//term vector used to read & write the index twice, here and in the parent class
|
||||
out.writeString(index);
|
||||
}
|
||||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(doc != null);
|
||||
if (doc != null) {
|
||||
out.writeBytesReference(doc);
|
||||
}
|
||||
out.writeBoolean(doc != null);
|
||||
if (doc != null) {
|
||||
out.writeBytesReference(doc);
|
||||
}
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(preference);
|
||||
@ -454,13 +440,11 @@ public class TermVectorsRequest extends SingleShardOperationRequest<TermVectorsR
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
out.writeBoolean(perFieldAnalyzer != null);
|
||||
if (perFieldAnalyzer != null) {
|
||||
out.writeGenericValue(perFieldAnalyzer);
|
||||
}
|
||||
out.writeBoolean(realtime());
|
||||
out.writeBoolean(perFieldAnalyzer != null);
|
||||
if (perFieldAnalyzer != null) {
|
||||
out.writeGenericValue(perFieldAnalyzer);
|
||||
}
|
||||
out.writeBoolean(realtime());
|
||||
}
|
||||
|
||||
public static enum Flag {
|
||||
|
@ -648,11 +648,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
routing = in.readOptionalString();
|
||||
script = in.readOptionalString();
|
||||
if(Strings.hasLength(script)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
scriptType = ScriptService.ScriptType.readFrom(in);
|
||||
} else {
|
||||
scriptType = null;
|
||||
}
|
||||
scriptType = ScriptService.ScriptType.readFrom(in);
|
||||
}
|
||||
scriptLang = in.readOptionalString();
|
||||
scriptParams = in.readMap();
|
||||
@ -674,12 +670,10 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
upsertRequest.readFrom(in);
|
||||
}
|
||||
docAsUpsert = in.readBoolean();
|
||||
version = Versions.readVersion(in);
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
detectNoop = in.readBoolean();
|
||||
scriptedUpsert = in.readBoolean();
|
||||
}
|
||||
detectNoop = in.readBoolean();
|
||||
scriptedUpsert = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -691,7 +685,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
out.writeString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(script);
|
||||
if (Strings.hasLength(script) && out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
if (Strings.hasLength(script)) {
|
||||
ScriptService.ScriptType.writeTo(scriptType, out);
|
||||
}
|
||||
out.writeOptionalString(scriptLang);
|
||||
@ -727,12 +721,10 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
upsertRequest.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(docAsUpsert);
|
||||
Versions.writeVersion(version, out);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(detectNoop);
|
||||
out.writeBoolean(scriptedUpsert);
|
||||
}
|
||||
out.writeBoolean(detectNoop);
|
||||
out.writeBoolean(scriptedUpsert);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -625,21 +625,15 @@ public class ClusterState implements ToXContent {
|
||||
}
|
||||
|
||||
public static void writeTo(ClusterState state, StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_1)) {
|
||||
out.writeBoolean(state.clusterName != null);
|
||||
if (state.clusterName != null) {
|
||||
state.clusterName.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(state.clusterName != null);
|
||||
if (state.clusterName != null) {
|
||||
state.clusterName.writeTo(out);
|
||||
}
|
||||
out.writeLong(state.version());
|
||||
MetaData.Builder.writeTo(state.metaData(), out);
|
||||
RoutingTable.Builder.writeTo(state.routingTable(), out);
|
||||
DiscoveryNodes.Builder.writeTo(state.nodes(), out);
|
||||
ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out);
|
||||
if (out.getVersion().before(Version.V_1_1_0)) {
|
||||
// Versions before 1.1.0 are expecting AllocationExplanation
|
||||
AllocationExplanation.EMPTY.writeTo(out);
|
||||
}
|
||||
out.writeVInt(state.customs().size());
|
||||
for (ObjectObjectCursor<String, Custom> cursor : state.customs()) {
|
||||
out.writeString(cursor.key);
|
||||
@ -655,11 +649,8 @@ public class ClusterState implements ToXContent {
|
||||
*/
|
||||
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode, @Nullable ClusterName defaultClusterName) throws IOException {
|
||||
ClusterName clusterName = defaultClusterName;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_1)) {
|
||||
// it might be null even if it comes from a >= 1.1.1 node since it's origin might be an older node
|
||||
if (in.readBoolean()) {
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
}
|
||||
Builder builder = new Builder(clusterName);
|
||||
builder.version = in.readLong();
|
||||
@ -667,10 +658,6 @@ public class ClusterState implements ToXContent {
|
||||
builder.routingTable = RoutingTable.Builder.readFrom(in);
|
||||
builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode);
|
||||
builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in);
|
||||
if (in.getVersion().before(Version.V_1_1_0)) {
|
||||
// Ignore the explanation read, since after 1.1.0 it's not part of the cluster state
|
||||
AllocationExplanation.readAllocationExplanation(in);
|
||||
}
|
||||
int customSize = in.readVInt();
|
||||
for (int i = 0; i < customSize; i++) {
|
||||
String type = in.readString();
|
||||
|
@ -415,12 +415,10 @@ public class IndexTemplateMetaData {
|
||||
for (int i = 0; i < mappingsSize; i++) {
|
||||
builder.putMapping(in.readString(), CompressedString.readCompressedString(in));
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
|
||||
builder.putAlias(aliasMd);
|
||||
}
|
||||
int aliasesSize = in.readVInt();
|
||||
for (int i = 0; i < aliasesSize; i++) {
|
||||
AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in);
|
||||
builder.putAlias(aliasMd);
|
||||
}
|
||||
int customSize = in.readVInt();
|
||||
for (int i = 0; i < customSize; i++) {
|
||||
@ -441,11 +439,9 @@ public class IndexTemplateMetaData {
|
||||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeVInt(indexTemplateMetaData.aliases().size());
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexTemplateMetaData.aliases().values()) {
|
||||
AliasMetaData.Builder.writeTo(cursor.value, out);
|
||||
}
|
||||
out.writeVInt(indexTemplateMetaData.aliases().size());
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexTemplateMetaData.aliases().values()) {
|
||||
AliasMetaData.Builder.writeTo(cursor.value, out);
|
||||
}
|
||||
out.writeVInt(indexTemplateMetaData.customs().size());
|
||||
for (ObjectObjectCursor<String, IndexMetaData.Custom> cursor : indexTemplateMetaData.customs()) {
|
||||
|
@ -539,21 +539,9 @@ public class MappingMetaData {
|
||||
}
|
||||
// timestamp
|
||||
out.writeBoolean(mappingMd.timestamp().enabled());
|
||||
if (mappingMd.timestamp().hasPath()) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(mappingMd.timestamp().path());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(mappingMd.timestamp().path());
|
||||
out.writeString(mappingMd.timestamp().format());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
if (mappingMd.timestamp().hasDefaultTimestamp()) {
|
||||
out.writeBoolean(true);
|
||||
out.writeString(mappingMd.timestamp().defaultTimestamp());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
out.writeOptionalString(mappingMd.timestamp().defaultTimestamp());
|
||||
out.writeBoolean(mappingMd.hasParentField());
|
||||
}
|
||||
|
||||
@ -591,8 +579,7 @@ public class MappingMetaData {
|
||||
// routing
|
||||
Routing routing = new Routing(in.readBoolean(), in.readBoolean() ? in.readString() : null);
|
||||
// timestamp
|
||||
Timestamp timestamp = new Timestamp(in.readBoolean(), in.readBoolean() ? in.readString() : null, in.readString(),
|
||||
in.getVersion().onOrAfter(Version.V_1_4_0_Beta1) ? (in.readBoolean() ? in.readString() : null) : TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP);
|
||||
final Timestamp timestamp = new Timestamp(in.readBoolean(), in.readOptionalString(), in.readString(), in.readOptionalString());
|
||||
final boolean hasParentField = in.readBoolean();
|
||||
return new MappingMetaData(type, source, id, routing, timestamp, hasParentField);
|
||||
}
|
||||
|
@ -79,14 +79,8 @@ public class PendingClusterTask implements Streamable {
|
||||
insertOrder = in.readVLong();
|
||||
priority = Priority.readFrom(in);
|
||||
source = in.readText();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
timeInQueue = in.readLong();
|
||||
} else {
|
||||
timeInQueue = in.readVLong();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
executing = in.readBoolean();
|
||||
}
|
||||
timeInQueue = in.readLong();
|
||||
executing = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -94,14 +88,7 @@ public class PendingClusterTask implements Streamable {
|
||||
out.writeVLong(insertOrder);
|
||||
Priority.writeTo(priority, out);
|
||||
out.writeText(source);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
// timeInQueue is set to -1 when unknown and can be negative if time goes backwards
|
||||
out.writeLong(timeInQueue);
|
||||
} else {
|
||||
out.writeVLong(timeInQueue);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
out.writeBoolean(executing);
|
||||
}
|
||||
out.writeLong(timeInQueue);
|
||||
out.writeBoolean(executing);
|
||||
}
|
||||
}
|
@ -434,7 +434,7 @@ public class Lucene {
|
||||
|
||||
public static Explanation readExplanation(StreamInput in) throws IOException {
|
||||
Explanation explanation;
|
||||
if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_4_0_Beta1) && in.readBoolean()) {
|
||||
if (in.readBoolean()) {
|
||||
Boolean match = in.readOptionalBoolean();
|
||||
explanation = new ComplexExplanation();
|
||||
((ComplexExplanation) explanation).setMatch(match);
|
||||
@ -455,13 +455,11 @@ public class Lucene {
|
||||
|
||||
public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException {
|
||||
|
||||
if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_1_4_0_Beta1)) {
|
||||
if (explanation instanceof ComplexExplanation) {
|
||||
out.writeBoolean(true);
|
||||
out.writeOptionalBoolean(((ComplexExplanation) explanation).getMatch());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
if (explanation instanceof ComplexExplanation) {
|
||||
out.writeBoolean(true);
|
||||
out.writeOptionalBoolean(((ComplexExplanation) explanation).getMatch());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeFloat(explanation.getValue());
|
||||
out.writeString(explanation.getDescription());
|
||||
|
@ -82,47 +82,6 @@ public class Versions {
|
||||
return lookupState;
|
||||
}
|
||||
|
||||
public static void writeVersion(long version, StreamOutput out) throws IOException {
|
||||
if (out.getVersion().before(Version.V_1_2_0) && version == MATCH_ANY) {
|
||||
// we have to send out a value the node will understand
|
||||
version = MATCH_ANY_PRE_1_2_0;
|
||||
}
|
||||
out.writeLong(version);
|
||||
}
|
||||
|
||||
public static long readVersion(StreamInput in) throws IOException {
|
||||
long version = in.readLong();
|
||||
if (in.getVersion().before(Version.V_1_2_0) && version == MATCH_ANY_PRE_1_2_0) {
|
||||
version = MATCH_ANY;
|
||||
}
|
||||
return version;
|
||||
}
|
||||
|
||||
public static void writeVersionWithVLongForBW(long version, StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeLong(version);
|
||||
return;
|
||||
}
|
||||
|
||||
if (version == MATCH_ANY) {
|
||||
// we have to send out a value the node will understand
|
||||
version = MATCH_ANY_PRE_1_2_0;
|
||||
}
|
||||
out.writeVLong(version);
|
||||
}
|
||||
|
||||
public static long readVersionWithVLongForBW(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
return in.readLong();
|
||||
} else {
|
||||
long version = in.readVLong();
|
||||
if (version == MATCH_ANY_PRE_1_2_0) {
|
||||
return MATCH_ANY;
|
||||
}
|
||||
return version;
|
||||
}
|
||||
}
|
||||
|
||||
private Versions() {
|
||||
}
|
||||
|
||||
|
@ -423,9 +423,7 @@ public class MasterFaultDetection extends FaultDetection {
|
||||
super.readFrom(in);
|
||||
nodeId = in.readString();
|
||||
masterNodeId = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
}
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -433,9 +431,7 @@ public class MasterFaultDetection extends FaultDetection {
|
||||
super.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
out.writeString(masterNodeId);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
clusterName.writeTo(out);
|
||||
}
|
||||
clusterName.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -313,22 +313,18 @@ public class NodesFaultDetection extends FaultDetection {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodeId = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
masterNode = DiscoveryNode.readNode(in);
|
||||
clusterStateVersion = in.readLong();
|
||||
}
|
||||
clusterName = ClusterName.readClusterName(in);
|
||||
masterNode = DiscoveryNode.readNode(in);
|
||||
clusterStateVersion = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
clusterName.writeTo(out);
|
||||
masterNode.writeTo(out);
|
||||
out.writeLong(clusterStateVersion);
|
||||
}
|
||||
clusterName.writeTo(out);
|
||||
masterNode.writeTo(out);
|
||||
out.writeLong(clusterStateVersion);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -178,31 +178,19 @@ public class SegmentsStats implements Streamable, ToXContent {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
count = in.readVLong();
|
||||
memoryInBytes = in.readLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
indexWriterMemoryInBytes = in.readLong();
|
||||
versionMapMemoryInBytes = in.readLong();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
indexWriterMaxMemoryInBytes = in.readLong();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
bitsetMemoryInBytes = in.readLong();
|
||||
}
|
||||
indexWriterMemoryInBytes = in.readLong();
|
||||
versionMapMemoryInBytes = in.readLong();
|
||||
indexWriterMaxMemoryInBytes = in.readLong();
|
||||
bitsetMemoryInBytes = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(count);
|
||||
out.writeLong(memoryInBytes);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
out.writeLong(indexWriterMemoryInBytes);
|
||||
out.writeLong(versionMapMemoryInBytes);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeLong(indexWriterMaxMemoryInBytes);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeLong(bitsetMemoryInBytes);
|
||||
}
|
||||
out.writeLong(indexWriterMemoryInBytes);
|
||||
out.writeLong(versionMapMemoryInBytes);
|
||||
out.writeLong(indexWriterMaxMemoryInBytes);
|
||||
out.writeLong(bitsetMemoryInBytes);
|
||||
}
|
||||
}
|
||||
|
@ -159,15 +159,9 @@ public class IndexingStats implements Streamable, ToXContent {
|
||||
deleteCount = in.readVLong();
|
||||
deleteTimeInMillis = in.readVLong();
|
||||
deleteCurrent = in.readVLong();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
noopUpdateCount = in.readVLong();
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
isThrottled = in.readBoolean();
|
||||
throttleTimeInMillis = in.readLong();
|
||||
}
|
||||
noopUpdateCount = in.readVLong();
|
||||
isThrottled = in.readBoolean();
|
||||
throttleTimeInMillis = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -179,15 +173,9 @@ public class IndexingStats implements Streamable, ToXContent {
|
||||
out.writeVLong(deleteCount);
|
||||
out.writeVLong(deleteTimeInMillis);
|
||||
out.writeVLong(deleteCurrent);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeVLong(noopUpdateCount);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
out.writeBoolean(isThrottled);
|
||||
out.writeLong(throttleTimeInMillis);
|
||||
}
|
||||
out.writeVLong(noopUpdateCount);
|
||||
out.writeBoolean(isThrottled);
|
||||
out.writeLong(throttleTimeInMillis);
|
||||
|
||||
}
|
||||
|
||||
|
@ -121,15 +121,9 @@ public class StoreFileMetaData implements Streamable {
|
||||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_0)) {
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_3)) {
|
||||
hash = in.readBytesRef();
|
||||
} else {
|
||||
hash = new BytesRef();
|
||||
}
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
hash = in.readBytesRef();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -137,12 +131,8 @@ public class StoreFileMetaData implements Streamable {
|
||||
out.writeString(name);
|
||||
out.writeVLong(length);
|
||||
out.writeOptionalString(checksum);
|
||||
if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_0)) {
|
||||
out.writeOptionalString(writtenBy == null ? null : writtenBy.toString());
|
||||
}
|
||||
if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_3)) {
|
||||
out.writeBytesRef(hash);
|
||||
}
|
||||
out.writeOptionalString(writtenBy == null ? null : writtenBy.toString());
|
||||
out.writeBytesRef(hash);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,16 +86,8 @@ public class CircuitBreakerStats implements Streamable, ToXContent {
|
||||
limit = in.readLong();
|
||||
estimated = in.readLong();
|
||||
overhead = in.readDouble();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
this.trippedCount = in.readLong();
|
||||
} else {
|
||||
this.trippedCount = -1;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.name = CircuitBreaker.Name.readFrom(in);
|
||||
} else {
|
||||
this.name = CircuitBreaker.Name.FIELDDATA;
|
||||
}
|
||||
this.trippedCount = in.readLong();
|
||||
this.name = CircuitBreaker.Name.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -103,12 +95,8 @@ public class CircuitBreakerStats implements Streamable, ToXContent {
|
||||
out.writeLong(limit);
|
||||
out.writeLong(estimated);
|
||||
out.writeDouble(overhead);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeLong(trippedCount);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
CircuitBreaker.Name.writeTo(name, out);
|
||||
}
|
||||
out.writeLong(trippedCount);
|
||||
CircuitBreaker.Name.writeTo(name, out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,16 +94,10 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
String checksum = in.readOptionalString();
|
||||
content = in.readBytesReference();
|
||||
Version writtenBy = null;
|
||||
if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_0)) {
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
}
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
metaData = new StoreFileMetaData(name, length, checksum, writtenBy);
|
||||
if (in.getVersion().onOrAfter(org.elasticsearch.Version.V_1_4_0_Beta1)) {
|
||||
lastChunk = in.readBoolean();
|
||||
} else {
|
||||
lastChunk = false;
|
||||
}
|
||||
lastChunk = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -116,12 +110,8 @@ public final class RecoveryFileChunkRequest extends TransportRequest { // publi
|
||||
out.writeVLong(metaData.length());
|
||||
out.writeOptionalString(metaData.checksum());
|
||||
out.writeBytesReference(content);
|
||||
if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_1_3_0)) {
|
||||
out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());
|
||||
}
|
||||
if (out.getVersion().onOrAfter(org.elasticsearch.Version.V_1_4_0_Beta1)) {
|
||||
out.writeBoolean(lastChunk);
|
||||
}
|
||||
out.writeOptionalString(metaData.writtenBy() == null ? null : metaData.writtenBy().toString());
|
||||
out.writeBoolean(lastChunk);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,12 +69,7 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
int size = in.readVInt();
|
||||
operations = Lists.newArrayListWithExpectedSize(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
operations.add(TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.read(in));
|
||||
} else {
|
||||
operations.add(TranslogStreams.LEGACY_TRANSLOG_STREAM.read(in));
|
||||
}
|
||||
|
||||
operations.add(TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.read(in));
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,11 +80,7 @@ class RecoveryTranslogOperationsRequest extends TransportRequest {
|
||||
shardId.writeTo(out);
|
||||
out.writeVInt(operations.size());
|
||||
for (Translog.Operation operation : operations) {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.write(out, operation);
|
||||
} else {
|
||||
TranslogStreams.LEGACY_TRANSLOG_STREAM.write(out, operation);
|
||||
}
|
||||
TranslogStreams.CHECKSUMMED_TRANSLOG_STREAM.write(out, operation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -115,9 +115,7 @@ public class StartRecoveryRequest extends TransportRequest {
|
||||
StoreFileMetaData md = StoreFileMetaData.readStoreFileMetaData(in);
|
||||
existingFiles.put(md.name(), md);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_2)) {
|
||||
recoveryType = RecoveryState.Type.fromId(in.readByte());
|
||||
}
|
||||
recoveryType = RecoveryState.Type.fromId(in.readByte());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -132,8 +130,6 @@ public class StartRecoveryRequest extends TransportRequest {
|
||||
for (StoreFileMetaData md : existingFiles.values()) {
|
||||
md.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_2)) {
|
||||
out.writeByte(recoveryType.id());
|
||||
}
|
||||
out.writeByte(recoveryType.id());
|
||||
}
|
||||
}
|
||||
|
@ -73,11 +73,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent {
|
||||
final List<DiscoveryNode> nodes = newArrayList();
|
||||
for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
|
||||
DiscoveryNode node = cursor.value;
|
||||
Version version = node.getVersion();
|
||||
// Verification wasn't supported before v1.4.0 - no reason to send verification request to these nodes
|
||||
if (version != null && version.onOrAfter(Version.V_1_4_0)) {
|
||||
nodes.add(node);
|
||||
}
|
||||
nodes.add(node);
|
||||
}
|
||||
final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
|
||||
final AtomicInteger counter = new AtomicInteger(nodes.size());
|
||||
|
@ -149,14 +149,8 @@ public class SearchServiceTransportAction extends AbstractComponent {
|
||||
final boolean freed = searchService.freeContext(contextId);
|
||||
actionListener.onResponse(freed);
|
||||
} else {
|
||||
if (node.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
//use the separate action for scroll when possible
|
||||
transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(request, contextId), new FreeContextResponseHandler(actionListener));
|
||||
} else {
|
||||
//fallback to the previous action name if the new one is not supported by the node we are talking to.
|
||||
//Do use the same request since it has the same binary format as the previous SearchFreeContextRequest (without the OriginalIndices addition).
|
||||
transportService.sendRequest(node, FREE_CONTEXT_ACTION_NAME, new ScrollFreeContextRequest(request, contextId), new FreeContextResponseHandler(actionListener));
|
||||
}
|
||||
//use the separate action for scroll when possible
|
||||
transportService.sendRequest(node, FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextRequest(request, contextId), new FreeContextResponseHandler(actionListener));
|
||||
}
|
||||
}
|
||||
|
||||
@ -432,16 +426,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void sendExecuteFetchScroll(DiscoveryNode node, final ShardFetchRequest request, final SearchServiceListener<FetchSearchResult> listener) {
|
||||
String action;
|
||||
if (node.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
//use the separate action for scroll when possible
|
||||
action = FETCH_ID_SCROLL_ACTION_NAME;
|
||||
} else {
|
||||
//fallback to the previous action name if the new one is not supported by the node we are talking to.
|
||||
//Do use the same request since it has the same binary format as the previous FetchSearchRequest (without the OriginalIndices addition).
|
||||
action = FETCH_ID_ACTION_NAME;
|
||||
}
|
||||
sendExecuteFetch(node, action, request, listener);
|
||||
sendExecuteFetch(node, FETCH_ID_SCROLL_ACTION_NAME, request, listener);
|
||||
}
|
||||
|
||||
private void sendExecuteFetch(DiscoveryNode node, String action, final ShardFetchRequest request, final SearchServiceListener<FetchSearchResult> listener) {
|
||||
@ -665,19 +650,13 @@ public class SearchServiceTransportAction extends AbstractComponent {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
freed = in.readBoolean();
|
||||
} else {
|
||||
freed = true;
|
||||
}
|
||||
freed = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
out.writeBoolean(freed);
|
||||
}
|
||||
out.writeBoolean(freed);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,9 +193,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St
|
||||
|
||||
public final void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
out.writeGenericValue(metaData);
|
||||
}
|
||||
out.writeGenericValue(metaData);
|
||||
doWriteTo(out);
|
||||
}
|
||||
|
||||
@ -203,9 +201,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St
|
||||
|
||||
public final void readFrom(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
|
||||
metaData = in.readMap();
|
||||
}
|
||||
metaData = in.readMap();
|
||||
doReadFrom(in);
|
||||
}
|
||||
|
||||
|
@ -200,10 +200,8 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
||||
public static EmptyBucketInfo readFrom(StreamInput in) throws IOException {
|
||||
Rounding rounding = Rounding.Streams.read(in);
|
||||
InternalAggregations aggs = InternalAggregations.readAggregations(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
if (in.readBoolean()) {
|
||||
return new EmptyBucketInfo(rounding, aggs, ExtendedBounds.readFrom(in));
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
return new EmptyBucketInfo(rounding, aggs, ExtendedBounds.readFrom(in));
|
||||
}
|
||||
return new EmptyBucketInfo(rounding, aggs);
|
||||
}
|
||||
@ -211,11 +209,9 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
|
||||
public static void writeTo(EmptyBucketInfo info, StreamOutput out) throws IOException {
|
||||
Rounding.Streams.write(info.rounding, out);
|
||||
info.subAggregations.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBoolean(info.bounds != null);
|
||||
if (info.bounds != null) {
|
||||
info.bounds.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(info.bounds != null);
|
||||
if (info.bounds != null) {
|
||||
info.bounds.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -207,9 +207,7 @@ public class SignificantLongTerms extends InternalSignificantTerms {
|
||||
out.writeVLong(minDocCount);
|
||||
out.writeVLong(subsetSize);
|
||||
out.writeVLong(supersetSize);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
significanceHeuristic.writeTo(out);
|
||||
}
|
||||
significanceHeuristic.writeTo(out);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalSignificantTerms.Bucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
|
@ -196,9 +196,7 @@ public class SignificantStringTerms extends InternalSignificantTerms {
|
||||
out.writeVLong(minDocCount);
|
||||
out.writeVLong(subsetSize);
|
||||
out.writeVLong(supersetSize);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
significanceHeuristic.writeTo(out);
|
||||
}
|
||||
significanceHeuristic.writeTo(out);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalSignificantTerms.Bucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
|
@ -34,11 +34,7 @@ public class SignificanceHeuristicStreams {
|
||||
private static ImmutableMap<String, Stream> STREAMS = ImmutableMap.of();
|
||||
|
||||
public static SignificanceHeuristic read(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
return stream(in.readString()).readResult(in);
|
||||
} else {
|
||||
return JLHScore.INSTANCE;
|
||||
}
|
||||
return stream(in.readString()).readResult(in);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -126,7 +126,7 @@ public class DoubleTerms extends InternalTerms {
|
||||
term = in.readDouble();
|
||||
docCount = in.readVLong();
|
||||
docCountError = -1;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
docCountError = in.readLong();
|
||||
}
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
@ -136,7 +136,7 @@ public class DoubleTerms extends InternalTerms {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(term);
|
||||
out.writeVLong(getDocCount());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
out.writeLong(docCountError);
|
||||
}
|
||||
aggregations.writeTo(out);
|
||||
@ -188,17 +188,10 @@ public class DoubleTerms extends InternalTerms {
|
||||
this.order = InternalOrder.Streams.readOrder(in);
|
||||
this.formatter = ValueFormatterStreams.readOptional(in);
|
||||
this.requiredSize = readSize(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
} else {
|
||||
this.shardSize = requiredSize;
|
||||
this.showTermDocCountError = false;
|
||||
}
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
this.minDocCount = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
this.otherDocCount = in.readVLong();
|
||||
}
|
||||
this.otherDocCount = in.readVLong();
|
||||
int size = in.readVInt();
|
||||
List<InternalTerms.Bucket> buckets = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -218,14 +211,10 @@ public class DoubleTerms extends InternalTerms {
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
ValueFormatterStreams.writeOptional(formatter, out);
|
||||
writeSize(requiredSize, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
}
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
out.writeVLong(minDocCount);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
out.writeVLong(otherDocCount);
|
||||
}
|
||||
out.writeVLong(otherDocCount);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalTerms.Bucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
|
@ -292,18 +292,7 @@ class InternalOrder extends Terms.Order {
|
||||
Aggregation aggregationOrder = (Aggregation) order;
|
||||
out.writeBoolean(((MultiBucketsAggregation.Bucket.SubAggregationComparator) aggregationOrder.comparator).asc());
|
||||
AggregationPath path = ((Aggregation) order).path();
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeString(path.toString());
|
||||
} else {
|
||||
// prev versions only supported sorting on a single level -> a single token;
|
||||
AggregationPath.PathElement token = path.lastPathElement();
|
||||
out.writeString(token.name);
|
||||
boolean hasValueName = token.key != null;
|
||||
out.writeBoolean(hasValueName);
|
||||
if (hasValueName) {
|
||||
out.writeString(token.key);
|
||||
}
|
||||
}
|
||||
out.writeString(path.toString());
|
||||
} else if (order instanceof CompoundOrder) {
|
||||
CompoundOrder compoundOrder = (CompoundOrder) order;
|
||||
out.writeByte(order.id());
|
||||
@ -330,15 +319,7 @@ class InternalOrder extends Terms.Order {
|
||||
case Aggregation.ID:
|
||||
boolean asc = in.readBoolean();
|
||||
String key = in.readString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
return new InternalOrder.Aggregation(key, asc);
|
||||
}
|
||||
boolean hasValueNmae = in.readBoolean();
|
||||
if (hasValueNmae) {
|
||||
return new InternalOrder.Aggregation(key + "." + in.readString(), asc);
|
||||
}
|
||||
Terms.Order order = new InternalOrder.Aggregation(key, asc);
|
||||
return absoluteOrder ? new CompoundOrder(Collections.singletonList(order)) : order;
|
||||
return new InternalOrder.Aggregation(key, asc);
|
||||
case CompoundOrder.ID:
|
||||
int size = in.readVInt();
|
||||
List<Terms.Order> compoundOrder = new ArrayList<>(size);
|
||||
|
@ -126,7 +126,7 @@ public class LongTerms extends InternalTerms {
|
||||
term = in.readLong();
|
||||
docCount = in.readVLong();
|
||||
docCountError = -1;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
docCountError = in.readLong();
|
||||
}
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
@ -136,7 +136,7 @@ public class LongTerms extends InternalTerms {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(term);
|
||||
out.writeVLong(getDocCount());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
out.writeLong(docCountError);
|
||||
}
|
||||
aggregations.writeTo(out);
|
||||
@ -180,25 +180,14 @@ public class LongTerms extends InternalTerms {
|
||||
|
||||
@Override
|
||||
protected void doReadFrom(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.docCountError = in.readLong();
|
||||
} else {
|
||||
this.docCountError = -1;
|
||||
}
|
||||
this.docCountError = in.readLong();
|
||||
this.order = InternalOrder.Streams.readOrder(in);
|
||||
this.formatter = ValueFormatterStreams.readOptional(in);
|
||||
this.requiredSize = readSize(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
} else {
|
||||
this.shardSize = requiredSize;
|
||||
this.showTermDocCountError = false;
|
||||
}
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
this.minDocCount = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
this.otherDocCount = in.readVLong();
|
||||
}
|
||||
this.otherDocCount = in.readVLong();
|
||||
int size = in.readVInt();
|
||||
List<InternalTerms.Bucket> buckets = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -212,20 +201,14 @@ public class LongTerms extends InternalTerms {
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeLong(docCountError);
|
||||
}
|
||||
out.writeLong(docCountError);
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
ValueFormatterStreams.writeOptional(formatter, out);
|
||||
writeSize(requiredSize, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
}
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
out.writeVLong(minDocCount);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
out.writeVLong(otherDocCount);
|
||||
}
|
||||
out.writeVLong(otherDocCount);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalTerms.Bucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
|
@ -127,7 +127,7 @@ public class StringTerms extends InternalTerms {
|
||||
termBytes = in.readBytesRef();
|
||||
docCount = in.readVLong();
|
||||
docCountError = -1;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
docCountError = in.readLong();
|
||||
}
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
@ -137,7 +137,7 @@ public class StringTerms extends InternalTerms {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBytesRef(termBytes);
|
||||
out.writeVLong(getDocCount());
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && showDocCountError) {
|
||||
if (showDocCountError) {
|
||||
out.writeLong(docCountError);
|
||||
}
|
||||
aggregations.writeTo(out);
|
||||
@ -175,24 +175,13 @@ public class StringTerms extends InternalTerms {
|
||||
|
||||
@Override
|
||||
protected void doReadFrom(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.docCountError = in.readLong();
|
||||
} else {
|
||||
this.docCountError = -1;
|
||||
}
|
||||
this.docCountError = in.readLong();
|
||||
this.order = InternalOrder.Streams.readOrder(in);
|
||||
this.requiredSize = readSize(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
} else {
|
||||
this.shardSize = requiredSize;
|
||||
this.showTermDocCountError = false;
|
||||
}
|
||||
this.shardSize = readSize(in);
|
||||
this.showTermDocCountError = in.readBoolean();
|
||||
this.minDocCount = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
this.otherDocCount = in.readVLong();
|
||||
}
|
||||
this.otherDocCount = in.readVLong();
|
||||
int size = in.readVInt();
|
||||
List<InternalTerms.Bucket> buckets = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -206,19 +195,13 @@ public class StringTerms extends InternalTerms {
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeLong(docCountError);
|
||||
}
|
||||
out.writeLong(docCountError);
|
||||
InternalOrder.Streams.writeOrder(order, out);
|
||||
writeSize(requiredSize, out);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
}
|
||||
writeSize(shardSize, out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
out.writeVLong(minDocCount);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0)) {
|
||||
out.writeVLong(otherDocCount);
|
||||
}
|
||||
out.writeVLong(otherDocCount);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalTerms.Bucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
|
@ -90,15 +90,13 @@ public class ShardFetchRequest extends TransportRequest {
|
||||
for (int i = 0; i < size; i++) {
|
||||
docIds[i] = in.readVInt();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
byte flag = in.readByte();
|
||||
if (flag == 1) {
|
||||
lastEmittedDoc = Lucene.readFieldDoc(in);
|
||||
} else if (flag == 2) {
|
||||
lastEmittedDoc = Lucene.readScoreDoc(in);
|
||||
} else if (flag != 0) {
|
||||
throw new IOException("Unknown flag: " + flag);
|
||||
}
|
||||
byte flag = in.readByte();
|
||||
if (flag == 1) {
|
||||
lastEmittedDoc = Lucene.readFieldDoc(in);
|
||||
} else if (flag == 2) {
|
||||
lastEmittedDoc = Lucene.readScoreDoc(in);
|
||||
} else if (flag != 0) {
|
||||
throw new IOException("Unknown flag: " + flag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -110,16 +108,14 @@ public class ShardFetchRequest extends TransportRequest {
|
||||
for (int i = 0; i < size; i++) {
|
||||
out.writeVInt(docIds[i]);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
if (lastEmittedDoc == null) {
|
||||
out.writeByte((byte) 0);
|
||||
} else if (lastEmittedDoc instanceof FieldDoc) {
|
||||
out.writeByte((byte) 1);
|
||||
Lucene.writeFieldDoc(out, (FieldDoc) lastEmittedDoc);
|
||||
} else {
|
||||
out.writeByte((byte) 2);
|
||||
Lucene.writeScoreDoc(out, lastEmittedDoc);
|
||||
}
|
||||
if (lastEmittedDoc == null) {
|
||||
out.writeByte((byte) 0);
|
||||
} else if (lastEmittedDoc instanceof FieldDoc) {
|
||||
out.writeByte((byte) 1);
|
||||
Lucene.writeFieldDoc(out, (FieldDoc) lastEmittedDoc);
|
||||
} else {
|
||||
out.writeByte((byte) 2);
|
||||
Lucene.writeScoreDoc(out, lastEmittedDoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -177,9 +177,7 @@ public class FetchSourceContext implements Streamable {
|
||||
fetchSource = in.readBoolean();
|
||||
includes = in.readStringArray();
|
||||
excludes = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
transformSource = in.readBoolean();
|
||||
}
|
||||
transformSource = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -187,9 +185,7 @@ public class FetchSourceContext implements Streamable {
|
||||
out.writeBoolean(fetchSource);
|
||||
out.writeStringArray(includes);
|
||||
out.writeStringArray(excludes);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
out.writeBoolean(transformSource);
|
||||
}
|
||||
out.writeBoolean(transformSource);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -113,9 +113,7 @@ public class InternalSearchResponse implements Streamable, ToXContent {
|
||||
}
|
||||
timedOut = in.readBoolean();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminatedEarly = in.readOptionalBoolean();
|
||||
}
|
||||
terminatedEarly = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -135,9 +133,6 @@ public class InternalSearchResponse implements Streamable, ToXContent {
|
||||
}
|
||||
out.writeBoolean(timedOut);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(terminatedEarly);
|
||||
|
||||
}
|
||||
out.writeOptionalBoolean(terminatedEarly);
|
||||
}
|
||||
}
|
||||
|
@ -229,26 +229,14 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
||||
filteringAliases = in.readStringArray();
|
||||
nowInMillis = in.readVLong();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
templateSource = in.readBytesReference();
|
||||
templateName = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
templateType = ScriptService.ScriptType.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
templateParams = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
}
|
||||
if (in.getVersion().onOrAfter(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
useSlowScroll = in.readBoolean();
|
||||
} else {
|
||||
// This means that this request was send from a 1.0.x or 1.1.x node and we need to fallback to slow scroll.
|
||||
useSlowScroll = in.getVersion().before(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION);
|
||||
}
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readOptionalBoolean();
|
||||
templateSource = in.readBytesReference();
|
||||
templateName = in.readOptionalString();
|
||||
templateType = ScriptService.ScriptType.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
templateParams = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
useSlowScroll = in.readBoolean();
|
||||
queryCache = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException {
|
||||
@ -272,25 +260,16 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
||||
out.writeVLong(nowInMillis);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
|
||||
out.writeBytesReference(templateSource);
|
||||
out.writeOptionalString(templateName);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_3_0)) {
|
||||
ScriptService.ScriptType.writeTo(templateType, out);
|
||||
}
|
||||
boolean existTemplateParams = templateParams != null;
|
||||
out.writeBoolean(existTemplateParams);
|
||||
if (existTemplateParams) {
|
||||
out.writeGenericValue(templateParams);
|
||||
}
|
||||
}
|
||||
if (out.getVersion().onOrAfter(ParsedScrollId.SCROLL_SEARCH_AFTER_MINIMUM_VERSION)) {
|
||||
out.writeBoolean(useSlowScroll);
|
||||
}
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
out.writeBytesReference(templateSource);
|
||||
out.writeOptionalString(templateName);
|
||||
ScriptService.ScriptType.writeTo(templateType, out);
|
||||
boolean existTemplateParams = templateParams != null;
|
||||
out.writeBoolean(existTemplateParams);
|
||||
if (existTemplateParams) {
|
||||
out.writeGenericValue(templateParams);
|
||||
}
|
||||
out.writeBoolean(useSlowScroll);
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.search.internal;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
@ -164,11 +163,6 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
||||
super.readFrom(in);
|
||||
shardSearchLocalRequest = new ShardSearchLocalRequest();
|
||||
shardSearchLocalRequest.innerReadFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && in.getVersion().before(Version.V_1_4_0)) {
|
||||
//original indices used to be optional in 1.4.0.Beta1 but ended up being empty only when the
|
||||
// shard search request was used locally and never serialized
|
||||
in.readBoolean();
|
||||
}
|
||||
originalIndices = OriginalIndices.readOriginalIndices(in);
|
||||
}
|
||||
|
||||
@ -176,11 +170,6 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardSearchLocalRequest.innerWriteTo(out, false);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1) && out.getVersion().before(Version.V_1_4_0)) {
|
||||
//original indices used to be optional in 1.4.0.Beta1 although ended up being empty only when the
|
||||
//shard search request was used locally and never serialized
|
||||
out.writeBoolean(true);
|
||||
}
|
||||
OriginalIndices.writeOriginalIndices(originalIndices, out);
|
||||
}
|
||||
|
||||
|
@ -164,9 +164,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
||||
suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in);
|
||||
}
|
||||
searchTimedOut = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
terminatedEarly = in.readOptionalBoolean();
|
||||
}
|
||||
terminatedEarly = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -194,8 +192,6 @@ public class QuerySearchResult extends QuerySearchResultProvider {
|
||||
suggest.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(searchTimedOut);
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(terminatedEarly);
|
||||
}
|
||||
out.writeOptionalBoolean(terminatedEarly);
|
||||
}
|
||||
}
|
||||
|
@ -588,10 +588,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
||||
text = in.readText();
|
||||
score = in.readFloat();
|
||||
highlighted = in.readOptionalText();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
collateMatch = in.readOptionalBoolean();
|
||||
}
|
||||
collateMatch = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -599,10 +596,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
||||
out.writeText(text);
|
||||
out.writeFloat(score);
|
||||
out.writeOptionalText(highlighted);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(collateMatch);
|
||||
}
|
||||
out.writeOptionalBoolean(collateMatch);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -110,9 +110,7 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom {
|
||||
source = in.readBytesReference();
|
||||
}
|
||||
Boolean queryCache = null;
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
queryCache = in.readOptionalBoolean();
|
||||
}
|
||||
queryCache = in.readOptionalBoolean();
|
||||
entries[i] = new Entry(name, types, queryCache, source);
|
||||
}
|
||||
return new IndexWarmersMetaData(entries);
|
||||
@ -130,9 +128,7 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom {
|
||||
out.writeBoolean(true);
|
||||
out.writeBytesReference(entry.source());
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeOptionalBoolean(entry.queryCache());
|
||||
}
|
||||
out.writeOptionalBoolean(entry.queryCache());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1198,7 +1198,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
||||
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
|
||||
if (primary == null || !primary.assignedToNode()) {
|
||||
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
|
||||
} else if (clusterState.getNodes().smallestVersion().onOrAfter(Version.V_1_2_0) && (primary.relocating() || primary.initializing())) {
|
||||
} else if (primary.relocating() || primary.initializing()) {
|
||||
// The WAITING state was introduced in V1.2.0 - don't use it if there are nodes with older version in the cluster
|
||||
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING));
|
||||
} else if (!primary.started()) {
|
||||
|
@ -51,13 +51,8 @@ public class OriginalIndicesTests extends ElasticsearchTestCase {
|
||||
in.setVersion(out.getVersion());
|
||||
OriginalIndices originalIndices2 = OriginalIndices.readOriginalIndices(in);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
assertThat(originalIndices2.indices(), equalTo(originalIndices.indices()));
|
||||
assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions()));
|
||||
} else {
|
||||
assertThat(originalIndices2.indices(), nullValue());
|
||||
assertThat(originalIndices2.indicesOptions(), nullValue());
|
||||
}
|
||||
assertThat(originalIndices2.indices(), equalTo(originalIndices.indices()));
|
||||
assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -81,13 +76,8 @@ public class OriginalIndicesTests extends ElasticsearchTestCase {
|
||||
in.setVersion(out.getVersion());
|
||||
OriginalIndices originalIndices2 = OriginalIndices.readOptionalOriginalIndices(in);
|
||||
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
assertThat(originalIndices2.indices(), equalTo(originalIndices.indices()));
|
||||
assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions()));
|
||||
} else {
|
||||
assertThat(originalIndices2.indices(), nullValue());
|
||||
assertThat(originalIndices2.indicesOptions(), nullValue());
|
||||
}
|
||||
assertThat(originalIndices2.indices(), equalTo(originalIndices.indices()));
|
||||
assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,21 +82,12 @@ public class MultiGetShardRequestTests extends ElasticsearchTestCase {
|
||||
assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference()));
|
||||
assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime()));
|
||||
assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh()));
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
|
||||
} else {
|
||||
assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(false));
|
||||
}
|
||||
assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
|
||||
assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size()));
|
||||
for (int i = 0; i < multiGetShardRequest2.items.size(); i++) {
|
||||
MultiGetRequest.Item item = multiGetShardRequest.items.get(i);
|
||||
MultiGetRequest.Item item2 = multiGetShardRequest2.items.get(i);
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
assertThat(item2.index(), equalTo(item.index()));
|
||||
} else {
|
||||
//before 1.4 we have only one index, the concrete one
|
||||
assertThat(item2.index(), equalTo(multiGetShardRequest.index()));
|
||||
}
|
||||
assertThat(item2.type(), equalTo(item.type()));
|
||||
assertThat(item2.id(), equalTo(item.id()));
|
||||
assertThat(item2.fields(), equalTo(item.fields()));
|
||||
@ -104,10 +95,7 @@ public class MultiGetShardRequestTests extends ElasticsearchTestCase {
|
||||
assertThat(item2.versionType(), equalTo(item.versionType()));
|
||||
assertThat(item2.fetchSourceContext(), equalTo(item.fetchSourceContext()));
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
//we don't serialize the original index before 1.4, it'll get the concrete one
|
||||
assertThat(multiGetShardRequest2.indices(), equalTo(multiGetShardRequest.indices()));
|
||||
assertThat(multiGetShardRequest2.indicesOptions(), equalTo(multiGetShardRequest.indicesOptions()));
|
||||
}
|
||||
assertThat(multiGetShardRequest2.indices(), equalTo(multiGetShardRequest.indices()));
|
||||
assertThat(multiGetShardRequest2.indicesOptions(), equalTo(multiGetShardRequest.indicesOptions()));
|
||||
}
|
||||
}
|
||||
|
@ -147,11 +147,7 @@ public class MoreLikeThisRequestTests extends ElasticsearchTestCase {
|
||||
} else {
|
||||
assertThat(mltRequest2.fields(), equalTo(mltRequest.fields()));
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_2_0)) {
|
||||
assertThat(mltRequest2.include(), equalTo(mltRequest.include()));
|
||||
} else {
|
||||
assertThat(mltRequest2.include(), is(false));
|
||||
}
|
||||
assertThat(mltRequest2.include(), equalTo(mltRequest.include()));
|
||||
}
|
||||
|
||||
private static String[] randomStrings(int max) {
|
||||
|
@ -49,17 +49,8 @@ public class IndicesOptionsTests extends ElasticsearchTestCase {
|
||||
assertThat(indicesOptions2.expandWildcardsOpen(), equalTo(indicesOptions.expandWildcardsOpen()));
|
||||
assertThat(indicesOptions2.expandWildcardsClosed(), equalTo(indicesOptions.expandWildcardsClosed()));
|
||||
|
||||
if (outputVersion.onOrAfter(Version.V_1_2_2)) {
|
||||
assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices()));
|
||||
assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices()));
|
||||
} else if (outputVersion.onOrAfter(Version.V_1_2_0)) {
|
||||
assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices()));
|
||||
assertThat(indicesOptions2.forbidClosedIndices(), equalTo(false));
|
||||
} else {
|
||||
//default value (true) if the node version doesn't support the allowAliasesToMultipleIndices flag
|
||||
assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(true));
|
||||
assertThat(indicesOptions2.forbidClosedIndices(), equalTo(false));
|
||||
}
|
||||
assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices()));
|
||||
assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices()));
|
||||
}
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user