Convert several direct uses of Streamable to Writeable (#44586) (#44604)

This commit converts several utility classes that implement Streamable
to have StreamInput constructors. It also adds a default version of
readFrom to Streamable so that overriding to throw UOE is not necessary.

relates #34389
This commit is contained in:
Ryan Ernst 2019-07-18 21:25:44 -07:00 committed by GitHub
parent 336364fefe
commit 60785a9fa8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 119 additions and 251 deletions

View File

@ -106,9 +106,7 @@ public class PendingClusterTasksResponse extends ActionResponse implements Itera
int size = in.readVInt();
pendingTasks = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
PendingClusterTask task = new PendingClusterTask();
task.readFrom(in);
pendingTasks.add(task);
pendingTasks.add(new PendingClusterTask(in));
}
}

View File

@ -259,7 +259,7 @@ public class CommonStats implements Writeable, ToXContentFragment {
out.writeOptionalStreamable(warmer);
out.writeOptionalStreamable(queryCache);
out.writeOptionalStreamable(fieldData);
out.writeOptionalStreamable(completion);
out.writeOptionalWriteable(completion);
out.writeOptionalStreamable(segments);
out.writeOptionalStreamable(translog);
out.writeOptionalStreamable(requestCache);

View File

@ -23,19 +23,23 @@ import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Objects;
public class BulkItemRequest implements Streamable {
public class BulkItemRequest implements Writeable {
private int id;
private DocWriteRequest<?> request;
private volatile BulkItemResponse primaryResponse;
BulkItemRequest() {
BulkItemRequest(StreamInput in) throws IOException {
id = in.readVInt();
request = DocWriteRequest.readDocumentRequest(in);
if (in.readBoolean()) {
primaryResponse = new BulkItemResponse(in);
}
}
// NOTE: public for testing only
@ -89,25 +93,10 @@ public class BulkItemRequest implements Streamable {
}
}
public static BulkItemRequest readBulkItem(StreamInput in) throws IOException {
BulkItemRequest item = new BulkItemRequest();
item.readFrom(in);
return item;
}
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
request = DocWriteRequest.readDocumentRequest(in);
if (in.readBoolean()) {
primaryResponse = BulkItemResponse.readBulkItem(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);
DocWriteRequest.writeDocumentRequest(out, request);
out.writeOptionalStreamable(primaryResponse);
out.writeOptionalWriteable(primaryResponse);
}
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.StatusToXContentObject;
@ -53,7 +52,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknown
* Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
* of the relevant action, and if it has failed or not (with the failure message incase it failed).
*/
public class BulkItemResponse implements Streamable, StatusToXContentObject {
public class BulkItemResponse implements Writeable, StatusToXContentObject {
private static final String _INDEX = "_index";
private static final String _TYPE = "_type";
@ -361,8 +360,24 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
private Failure failure;
BulkItemResponse() {
BulkItemResponse() {}
BulkItemResponse(StreamInput in) throws IOException {
id = in.readVInt();
opType = OpType.fromId(in.readByte());
byte type = in.readByte();
if (type == 0) {
response = new IndexResponse(in);
} else if (type == 1) {
response = new DeleteResponse(in);
} else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses'
response = new UpdateResponse(in);
}
if (in.readBoolean()) {
failure = new Failure(in);
}
}
public BulkItemResponse(int id, OpType opType, DocWriteResponse response) {
@ -463,31 +478,6 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
return this.failure;
}
public static BulkItemResponse readBulkItem(StreamInput in) throws IOException {
BulkItemResponse response = new BulkItemResponse();
response.readFrom(in);
return response;
}
@Override
public void readFrom(StreamInput in) throws IOException {
id = in.readVInt();
opType = OpType.fromId(in.readByte());
byte type = in.readByte();
if (type == 0) {
response = new IndexResponse(in);
} else if (type == 1) {
response = new DeleteResponse(in);
} else if (type == 3) { // make 3 instead of 2, because 2 is already in use for 'no responses'
response = new UpdateResponse(in);
}
if (in.readBoolean()) {
failure = new Failure(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);

View File

@ -62,7 +62,7 @@ public class BulkResponse extends ActionResponse implements Iterable<BulkItemRes
super(in);
responses = new BulkItemResponse[in.readVInt()];
for (int i = 0; i < responses.length; i++) {
responses[i] = BulkItemResponse.readBulkItem(in);
responses[i] = new BulkItemResponse(in);
}
tookInMillis = in.readVLong();
ingestTookInMillis = in.readZLong();

View File

@ -38,7 +38,7 @@ public class BulkShardRequest extends ReplicatedWriteRequest<BulkShardRequest> {
items = new BulkItemRequest[in.readVInt()];
for (int i = 0; i < items.length; i++) {
if (in.readBoolean()) {
items[i] = BulkItemRequest.readBulkItem(in);
items[i] = new BulkItemRequest(in);
}
}
}

View File

@ -38,7 +38,7 @@ public class BulkShardResponse extends ReplicationResponse implements WriteRespo
shardId = new ShardId(in);
responses = new BulkItemResponse[in.readVInt()];
for (int i = 0; i < responses.length; i++) {
responses[i] = BulkItemResponse.readBulkItem(in);
responses[i] = new BulkItemResponse(in);
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -35,7 +34,7 @@ import java.util.EnumSet;
import java.util.Locale;
import java.util.Objects;
public class ClusterBlock implements Streamable, Writeable, ToXContentFragment {
public class ClusterBlock implements Writeable, ToXContentFragment {
private int id;
private @Nullable String uuid;
@ -147,11 +146,6 @@ public class ClusterBlock implements Streamable, Writeable, ToXContentFragment {
return builder;
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(id);

View File

@ -22,13 +22,13 @@ package org.elasticsearch.cluster.service;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
public class PendingClusterTask implements Streamable {
public class PendingClusterTask implements Writeable {
private long insertOrder;
private Priority priority;
@ -36,7 +36,12 @@ public class PendingClusterTask implements Streamable {
private long timeInQueue;
private boolean executing;
public PendingClusterTask() {
public PendingClusterTask(StreamInput in) throws IOException {
insertOrder = in.readVLong();
priority = Priority.readFrom(in);
source = in.readText();
timeInQueue = in.readLong();
executing = in.readBoolean();
}
public PendingClusterTask(long insertOrder, Priority priority, Text source, long timeInQueue, boolean executing) {
@ -73,15 +78,6 @@ public class PendingClusterTask implements Streamable {
return executing;
}
@Override
public void readFrom(StreamInput in) throws IOException {
insertOrder = in.readVLong();
priority = Priority.readFrom(in);
source = in.readText();
timeInQueue = in.readLong();
executing = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(insertOrder);

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common.document;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@ -44,12 +44,18 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsV
* @see SearchHit
* @see GetResult
*/
public class DocumentField implements Streamable, ToXContentFragment, Iterable<Object> {
public class DocumentField implements Writeable, ToXContentFragment, Iterable<Object> {
private String name;
private List<Object> values;
private DocumentField() {
public DocumentField(StreamInput in) throws IOException {
name = in.readString();
int size = in.readVInt();
values = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
values.add(in.readGenericValue());
}
}
public DocumentField(String name, List<Object> values) {
@ -93,22 +99,6 @@ public class DocumentField implements Streamable, ToXContentFragment, Iterable<O
return values.iterator();
}
public static DocumentField readDocumentField(StreamInput in) throws IOException {
DocumentField result = new DocumentField();
result.readFrom(in);
return result;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
int size = in.readVInt();
values = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
values.add(in.readGenericValue());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);

View File

@ -38,7 +38,9 @@ public interface Streamable {
/**
* Set this object's fields from a {@linkplain StreamInput}.
*/
void readFrom(StreamInput in) throws IOException;
default void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}
/**
* Write this object's fields to a {@linkplain StreamOutput}.

View File

@ -21,7 +21,7 @@ package org.elasticsearch.common.transport;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
@ -32,13 +32,19 @@ import java.io.IOException;
*
*
*/
public class BoundTransportAddress implements Streamable {
public class BoundTransportAddress implements Writeable {
private TransportAddress[] boundAddresses;
private TransportAddress publishAddress;
BoundTransportAddress() {
public BoundTransportAddress(StreamInput in) throws IOException {
int boundAddressLength = in.readInt();
boundAddresses = new TransportAddress[boundAddressLength];
for (int i = 0; i < boundAddressLength; i++) {
boundAddresses[i] = new TransportAddress(in);
}
publishAddress = new TransportAddress(in);
}
public BoundTransportAddress(TransportAddress[] boundAddresses, TransportAddress publishAddress) {
@ -57,22 +63,6 @@ public class BoundTransportAddress implements Streamable {
return publishAddress;
}
public static BoundTransportAddress readBoundTransportAddress(StreamInput in) throws IOException {
BoundTransportAddress addr = new BoundTransportAddress();
addr.readFrom(in);
return addr;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int boundAddressLength = in.readInt();
boundAddresses = new TransportAddress[boundAddressLength];
for (int i = 0; i < boundAddressLength; i++) {
boundAddresses[i] = new TransportAddress(in);
}
publishAddress = new TransportAddress(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(boundAddresses.length);

View File

@ -48,7 +48,7 @@ public class HttpInfo implements Writeable, ToXContentFragment {
private final boolean cnameInPublishHost;
public HttpInfo(StreamInput in) throws IOException {
this(BoundTransportAddress.readBoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST);
this(new BoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST);
}
public HttpInfo(BoundTransportAddress address, long maxContentLength) {

View File

@ -390,7 +390,7 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
} else {
fields = new HashMap<>(size);
for (int i = 0; i < size; i++) {
DocumentField field = DocumentField.readDocumentField(in);
DocumentField field = new DocumentField(in);
fields.put(field.getName(), field);
}
}

View File

@ -19,13 +19,7 @@
package org.elasticsearch.repositories;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import java.io.IOException;
public class VerificationFailure implements Streamable {
public class VerificationFailure {
private String nodeId;
@ -48,24 +42,6 @@ public class VerificationFailure implements Streamable {
return cause;
}
@Override
public void readFrom(StreamInput in) throws IOException {
nodeId = in.readOptionalString();
cause = in.readException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(nodeId);
out.writeException(cause);
}
public static VerificationFailure readNode(StreamInput in) throws IOException {
VerificationFailure failure = new VerificationFailure();
failure.readFrom(in);
return failure;
}
@Override
public String toString() {
return "[" + nodeId + ", '" + cause + "']";

View File

@ -72,7 +72,6 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optiona
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureFieldName;
import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue;
import static org.elasticsearch.search.fetch.subphase.highlight.HighlightField.readHighlightField;
/**
* A single search hit.
@ -163,12 +162,12 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
if (size == 0) {
fields = emptyMap();
} else if (size == 1) {
DocumentField hitField = DocumentField.readDocumentField(in);
DocumentField hitField = new DocumentField(in);
fields = singletonMap(hitField.getName(), hitField);
} else {
Map<String, DocumentField> fields = new HashMap<>();
for (int i = 0; i < size; i++) {
DocumentField hitField = DocumentField.readDocumentField(in);
DocumentField hitField = new DocumentField(in);
fields.put(hitField.getName(), hitField);
}
this.fields = unmodifiableMap(fields);
@ -178,12 +177,12 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
if (size == 0) {
highlightFields = emptyMap();
} else if (size == 1) {
HighlightField field = readHighlightField(in);
HighlightField field = new HighlightField(in);
highlightFields = singletonMap(field.name(), field);
} else {
Map<String, HighlightField> highlightFields = new HashMap<>();
for (int i = 0; i < size; i++) {
HighlightField field = readHighlightField(in);
HighlightField field = new HighlightField(in);
highlightFields.put(field.name(), field);
}
this.highlightFields = unmodifiableMap(highlightFields);

View File

@ -28,17 +28,28 @@ import org.apache.lucene.search.TermStatistics;
import org.elasticsearch.common.collect.HppcMaps;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
public class AggregatedDfs implements Streamable {
public class AggregatedDfs implements Writeable {
private ObjectObjectHashMap<Term, TermStatistics> termStatistics;
private ObjectObjectHashMap<String, CollectionStatistics> fieldStatistics;
private long maxDoc;
private AggregatedDfs() {
public AggregatedDfs(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = HppcMaps.newMap(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(),
in.readVLong(),
DfsSearchResult.subOne(in.readVLong()));
termStatistics.put(term, stats);
}
fieldStatistics = DfsSearchResult.readFieldStats(in);
maxDoc = in.readVLong();
}
public AggregatedDfs(ObjectObjectHashMap<Term, TermStatistics> termStatistics,
@ -60,27 +71,6 @@ public class AggregatedDfs implements Streamable {
return maxDoc;
}
public static AggregatedDfs readAggregatedDfs(StreamInput in) throws IOException {
AggregatedDfs result = new AggregatedDfs();
result.readFrom(in);
return result;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
termStatistics = HppcMaps.newMap(size);
for (int i = 0; i < size; i++) {
Term term = new Term(in.readString(), in.readBytesRef());
TermStatistics stats = new TermStatistics(in.readBytesRef(),
in.readVLong(),
DfsSearchResult.subOne(in.readVLong()));
termStatistics.put(term, stats);
}
fieldStatistics = DfsSearchResult.readFieldStats(in);
maxDoc = in.readVLong();
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeVInt(termStatistics.size());

View File

@ -22,7 +22,7 @@ package org.elasticsearch.search.fetch.subphase.highlight;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -39,13 +39,25 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect
/**
* A field highlighted with its highlighted fragments.
*/
public class HighlightField implements ToXContentFragment, Streamable {
public class HighlightField implements ToXContentFragment, Writeable {
private String name;
private Text[] fragments;
HighlightField() {
public HighlightField(StreamInput in) throws IOException {
name = in.readString();
if (in.readBoolean()) {
int size = in.readVInt();
if (size == 0) {
fragments = Text.EMPTY_ARRAY;
} else {
fragments = new Text[size];
for (int i = 0; i < size; i++) {
fragments[i] = in.readText();
}
}
}
}
public HighlightField(String name, Text[] fragments) {
@ -86,28 +98,6 @@ public class HighlightField implements ToXContentFragment, Streamable {
return "[" + name + "], fragments[" + Arrays.toString(fragments) + "]";
}
public static HighlightField readHighlightField(StreamInput in) throws IOException {
HighlightField field = new HighlightField();
field.readFrom(in);
return field;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
if (in.readBoolean()) {
int size = in.readVInt();
if (size == 0) {
fragments = Text.EMPTY_ARRAY;
} else {
fragments = new Text[size];
for (int i = 0; i < size; i++) {
fragments[i] = in.readText();
}
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);

View File

@ -34,8 +34,6 @@ import org.elasticsearch.transport.TransportRequest;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.search.dfs.AggregatedDfs.readAggregatedDfs;
public class QuerySearchRequest extends TransportRequest implements IndicesRequest {
private long id;
@ -56,7 +54,7 @@ public class QuerySearchRequest extends TransportRequest implements IndicesReque
public QuerySearchRequest(StreamInput in) throws IOException {
super(in);
id = in.readLong();
dfs = readAggregatedDfs(in);
dfs = new AggregatedDfs(in);
originalIndices = OriginalIndices.readOriginalIndices(in);
}

View File

@ -22,7 +22,6 @@ import org.elasticsearch.common.FieldMemoryStats;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment;
@ -30,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
public class CompletionStats implements Streamable, Writeable, ToXContentFragment {
public class CompletionStats implements Writeable, ToXContentFragment {
private static final String COMPLETION = "completion";
private static final String SIZE_IN_BYTES = "size_in_bytes";
@ -66,11 +65,6 @@ public class CompletionStats implements Streamable, Writeable, ToXContentFragmen
return fields;
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(sizeInBytes);

View File

@ -50,9 +50,7 @@ public class RestoreInfo implements ToXContentObject, Writeable {
private int successfulShards;
RestoreInfo() {
}
RestoreInfo() {}
public RestoreInfo(String name, List<String> indices, int totalShards, int successfulShards) {
this.name = name;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -43,13 +42,13 @@ public class TransportInfo implements Writeable, ToXContentFragment {
}
public TransportInfo(StreamInput in) throws IOException {
address = BoundTransportAddress.readBoundTransportAddress(in);
address = new BoundTransportAddress(in);
int size = in.readVInt();
if (size > 0) {
profileAddresses = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
BoundTransportAddress value = BoundTransportAddress.readBoundTransportAddress(in);
BoundTransportAddress value = new BoundTransportAddress(in);
profileAddresses.put(key, value);
}
}

View File

@ -52,13 +52,7 @@ public class BoundTransportAddressTests extends ESTestCase {
transportAddress.writeTo(streamOutput);
StreamInput in = streamOutput.bytes().streamInput();
BoundTransportAddress serializedAddress;
if (randomBoolean()) {
serializedAddress = BoundTransportAddress.readBoundTransportAddress(in);
} else {
serializedAddress = new BoundTransportAddress();
serializedAddress.readFrom(in);
}
BoundTransportAddress serializedAddress = new BoundTransportAddress(in);
assertThat(serializedAddress, not(sameInstance(transportAddress)));
assertThat(serializedAddress.boundAddresses().length, equalTo(transportAddress.boundAddresses().length));

View File

@ -114,7 +114,7 @@ public class HighlightFieldTests extends ESTestCase {
try (BytesStreamOutput output = new BytesStreamOutput()) {
testField.writeTo(output);
try (StreamInput in = output.bytes().streamInput()) {
HighlightField deserializedCopy = HighlightField.readHighlightField(in);
HighlightField deserializedCopy = new HighlightField(in);
assertEquals(testField, deserializedCopy);
assertEquals(testField.hashCode(), deserializedCopy.hashCode());
assertNotSame(testField, deserializedCopy);

View File

@ -60,9 +60,7 @@ public class TransportReloadAnalyzersAction
@Override
protected ReloadResult readShardResult(StreamInput in) throws IOException {
ReloadResult reloadResult = new ReloadResult();
reloadResult.readFrom(in);
return reloadResult;
return new ReloadResult(in);
}
@Override
@ -106,11 +104,7 @@ public class TransportReloadAnalyzersAction
this.reloadedSearchAnalyzers = reloadedSearchAnalyzers;
}
private ReloadResult() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
private ReloadResult(StreamInput in) throws IOException {
this.index = in.readString();
this.nodeId = in.readString();
this.reloadedSearchAnalyzers = in.readStringList();

View File

@ -9,7 +9,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.HashMap;
@ -22,10 +22,17 @@ import java.util.Map;
* Calling toNestedMap() will create a nested map, where each dot of the key name will nest deeper
* The main reason for this class is that the stats producer should not be worried about how the map is actually nested
*/
public class Counters implements Streamable {
public class Counters implements Writeable {
private ObjectLongHashMap<String> counters = new ObjectLongHashMap<>();
public Counters(StreamInput in) throws IOException {
int counters = in.readVInt();
for (int i = 0; i < counters; i++) {
inc(in.readString(), in.readVLong());
}
}
public Counters(String ... names) {
for (String name : names) {
set(name);
@ -102,14 +109,6 @@ public class Counters implements Streamable {
return map;
}
@Override
public void readFrom(StreamInput in) throws IOException {
int counters = in.readVInt();
for (int i = 0; i < counters; i++) {
inc(in.readString(), in.readVLong());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(counters.size());
@ -119,12 +118,6 @@ public class Counters implements Streamable {
}
}
public static Counters read(StreamInput in) throws IOException {
Counters counters = new Counters();
counters.readFrom(in);
return counters;
}
public static Counters merge(List<Counters> counters) {
Counters result = new Counters();
for (Counters c : counters) {

View File

@ -106,7 +106,7 @@ public class WatcherStatsResponse extends BaseNodesResponse<WatcherStatsResponse
queuedWatches = in.readList(QueuedWatch::new);
}
if (in.readBoolean()) {
stats = Counters.read(in);
stats = new Counters(in);
}
}

View File

@ -10,7 +10,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -37,7 +36,7 @@ import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.
import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeDate;
import static org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils.writeOptionalDate;
public class WatchStatus implements ToXContentObject, Streamable, Writeable {
public class WatchStatus implements ToXContentObject, Writeable {
public static final String INCLUDE_STATE = "include_state";
@ -239,11 +238,6 @@ public class WatchStatus implements ToXContentObject, Streamable, Writeable {
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();

View File

@ -58,7 +58,7 @@ public class SqlStatsResponse extends BaseNodesResponse<SqlStatsResponse.NodeSta
public NodeStatsResponse(StreamInput in) throws IOException {
super(in);
if (in.readBoolean()) {
stats = Counters.read(in);
stats = new Counters(in);
}
}