Remove Unused code and remove unnecessary abstraction

HashedBytesArray is not used anymore and Releable makes only sense on
Paged implementation such that the marker interface is unneeded.
This commit is contained in:
Simon Willnauer 2015-01-29 09:51:14 +01:00
parent 86e52c30a1
commit 4917121de2
8 changed files with 11 additions and 195 deletions

View File

@ -1,152 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.bytes;
import com.google.common.base.Charsets;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBuffers;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.channels.GatheringByteChannel;
/**
* A bytes array reference that caches the hash code.
*/
public class HashedBytesArray implements BytesReference {
private final byte[] bytes;
/**
* Cache the hash code for the string
*/
private int hash; // Defaults to 0
public HashedBytesArray(byte[] bytes) {
this.bytes = bytes;
}
@Override
public byte get(int index) {
return bytes[index];
}
@Override
public int length() {
return bytes.length;
}
@Override
public BytesReference slice(int from, int length) {
if (from < 0 || (from + length) > bytes.length) {
throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + bytes.length + "], with slice parameters from [" + from + "], length [" + length + "]");
}
return new BytesArray(bytes, from, length);
}
@Override
public StreamInput streamInput() {
return new BytesStreamInput(bytes, false);
}
@Override
public void writeTo(OutputStream os) throws IOException {
os.write(bytes);
}
@Override
public void writeTo(GatheringByteChannel channel) throws IOException {
Channels.writeToChannel(bytes, 0, bytes.length, channel);
}
@Override
public byte[] toBytes() {
return bytes;
}
@Override
public BytesArray toBytesArray() {
return new BytesArray(bytes);
}
@Override
public BytesArray copyBytesArray() {
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return new BytesArray(copy);
}
@Override
public ChannelBuffer toChannelBuffer() {
return ChannelBuffers.wrappedBuffer(bytes, 0, bytes.length);
}
@Override
public boolean hasArray() {
return true;
}
@Override
public byte[] array() {
return bytes;
}
@Override
public int arrayOffset() {
return 0;
}
@Override
public String toUtf8() {
if (bytes.length == 0) {
return "";
}
return new String(bytes, Charsets.UTF_8);
}
@Override
public BytesRef toBytesRef() {
return new BytesRef(bytes);
}
@Override
public BytesRef copyBytesRef() {
byte[] copy = new byte[bytes.length];
System.arraycopy(bytes, 0, copy, 0, bytes.length);
return new BytesRef(copy);
}
@Override
public int hashCode() {
if (hash == 0) {
hash = Helper.bytesHashCode(this);
}
return hash;
}
@Override
public boolean equals(Object obj) {
return Helper.bytesEqual(this, (BytesReference) obj);
}
}

View File

@ -1,28 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.bytes;
import org.elasticsearch.common.lease.Releasable;
/**
* A bytes reference that needs to be released once its usage is done.
*/
public interface ReleasableBytesReference extends BytesReference, Releasable {
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.bytes;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray;
@ -28,16 +29,12 @@ import org.elasticsearch.common.util.ByteArray;
* An extension to {@link PagedBytesReference} that requires releasing its content. This
* class exists to make it explicit when a bytes reference needs to be released, and when not.
*/
public class ReleasablePagedBytesReference extends PagedBytesReference implements ReleasableBytesReference {
public class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) {
super(bigarrays, bytearray, length);
}
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int from, int length) {
super(bigarrays, bytearray, from, length);
}
@Override
public void close() throws ElasticsearchException {
Releasables.close(bytearray);

View File

@ -19,12 +19,12 @@
package org.elasticsearch.common.io;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
/**
* A bytes stream that requires its bytes to be released once no longer used.
*/
public interface ReleasableBytesStream extends BytesStream {
ReleasableBytesReference bytes();
ReleasablePagedBytesReference bytes();
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.common.io.stream;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
import org.elasticsearch.common.io.ReleasableBytesStream;
import org.elasticsearch.common.util.BigArrays;
@ -42,7 +41,7 @@ public class ReleasableBytesStreamOutput extends BytesStreamOutput implements Re
}
@Override
public ReleasableBytesReference bytes() {
public ReleasablePagedBytesReference bytes() {
return new ReleasablePagedBytesReference(bigarrays, bytes, count);
}
}

View File

@ -23,7 +23,7 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.BytesStreamInput;
import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
@ -352,7 +352,7 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog
try {
out = new ReleasableBytesStreamOutput(bigArrays);
TranslogStreams.writeTranslogOperation(out, operation);
ReleasableBytesReference bytes = out.bytes();
ReleasablePagedBytesReference bytes = out.bytes();
Location location = current.add(bytes);
if (syncOnEachOperation) {
current.sync();

View File

@ -27,7 +27,7 @@ import org.elasticsearch.*;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.inject.Inject;
@ -675,7 +675,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
stream.setVersion(version);
stream.writeString(action);
ReleasableBytesReference bytes;
ReleasablePagedBytesReference bytes;
ChannelBuffer buffer;
// it might be nice to somehow generalize this optimization, maybe a smart "paged" bytes output
// that create paged channel buffers, but its tricky to know when to do it (where this option is

View File

@ -21,7 +21,7 @@ package org.elasticsearch.transport.netty;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.ThrowableObjectOutputStream;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@ -91,7 +91,7 @@ public class NettyTransportChannel implements TransportChannel {
response.writeTo(stream);
stream.close();
ReleasableBytesReference bytes = bStream.bytes();
ReleasablePagedBytesReference bytes = bStream.bytes();
ChannelBuffer buffer = bytes.toChannelBuffer();
NettyHeader.writeHeader(buffer, requestId, status, version);
ChannelFuture future = channel.write(buffer);