diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index 3612220ebc6..46e4b81ba6c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -29,8 +29,6 @@ import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; -import javax.annotation.Nullable; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java new file mode 100644 index 00000000000..8a21f9edda1 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java @@ -0,0 +1,104 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.io; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; + +/** + * Copied from guava source code v15 (LimitedInputStream) + * Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here + * allows to be compatible with guava 11 to 15+. + */ +public final class LimitInputStream extends FilterInputStream { + private long left; + private long mark = -1; + + public LimitInputStream(InputStream in, long limit) { + super(in); + checkNotNull(in); + checkArgument(limit >= 0, "limit must be non-negative"); + left = limit; + } + + @Override + public int available() throws IOException { + return (int) Math.min(in.available(), left); + } + + // it's okay to mark even if mark isn't supported, as reset won't work + @Override + public synchronized void mark(int readLimit) { + in.mark(readLimit); + mark = left; + } + + @Override + public int read() throws IOException { + if (left == 0) { + return -1; + } + + int result = in.read(); + if (result != -1) { + --left; + } + return result; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (left == 0) { + return -1; + } + + len = (int) Math.min(len, left); + int result = in.read(b, off, len); + if (result != -1) { + left -= result; + } + return result; + } + + @Override + public synchronized void reset() throws IOException { + if (!in.markSupported()) { + throw new IOException("Mark not supported"); + } + if (mark == -1) { + throw new IOException("Mark not set"); + } + + in.reset(); + left = mark; + } + + @Override + public long skip(long n) throws IOException { + n = Math.min(n, left); + long skipped = in.skip(n); + left -= skipped; + return skipped; + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java index 6cd168ab535..e75b32efc98 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/EncodedDataBlock.java @@ -33,10 +33,10 @@ import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.hfile.HFileContext; import org.apache.hadoop.hbase.util.ByteBufferUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.compress.Compressor; import com.google.common.base.Preconditions; -import com.google.common.io.NullOutputStream; /** * Encapsulates a data block compressed using a particular encoding algorithm. @@ -161,7 +161,7 @@ public class EncodedDataBlock { public static int getCompressedSize(Algorithm algo, Compressor compressor, byte[] inputBuffer, int offset, int length) throws IOException { DataOutputStream compressedStream = new DataOutputStream( - new NullOutputStream()); + new IOUtils.NullOutputStream()); if (compressor != null) { compressor.reset(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java index 12c8359809e..de009280b68 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.Bytes; -import com.google.common.io.NullOutputStream; - /** * The {@link HFile} has a fixed trailer which contains offsets to other * variable parts of the file. Also includes basic metadata on this file. The diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/ExactCounterMetric.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/ExactCounterMetric.java index 3fe573ec688..715043fb50f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/ExactCounterMetric.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/metrics/ExactCounterMetric.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase.metrics; import java.util.Collections; import java.util.Comparator; import java.util.List; -import java.util.Map; import java.util.Map.Entry; -import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -33,9 +33,7 @@ import org.apache.hadoop.metrics.util.MetricsBase; import org.apache.hadoop.metrics.util.MetricsRegistry; import org.cliffc.high_scale_lib.Counter; -import com.google.common.base.Function; import com.google.common.collect.Lists; -import com.google.common.collect.MapMaker; @Deprecated public class ExactCounterMetric extends MetricsBase { @@ -44,34 +42,25 @@ public class ExactCounterMetric extends MetricsBase { // only publish stats on the topN items (default to DEFAULT_TOP_N) private final int topN; - private final Map counts; + private final ConcurrentMap counts = new ConcurrentHashMap(); // all access to the 'counts' map should use this lock. // take a write lock iff you want to guarantee exclusive access // (the map stripes locks internally, so it's already thread safe - // this lock is just so you can take a consistent snapshot of data) private final ReadWriteLock lock; - - - /** - * Constructor to create a new counter metric - * @param nam the name to publish this metric under - * @param registry where the metrics object will be registered - * @param description metrics description - * @param topN how many 'keys' to publish metrics on - */ + + /** + * Constructor to create a new counter metric + * @param nam the name to publish this metric under + * @param registry where the metrics object will be registered + * @param description metrics description + * @param topN how many 'keys' to publish metrics on + */ public ExactCounterMetric(final String nam, final MetricsRegistry registry, final String description, int topN) { super(nam, description); - this.counts = new MapMaker().makeComputingMap( - new Function() { - @Override - public Counter apply(String input) { - return new Counter(); - } - }); - this.lock = new ReentrantReadWriteLock(); this.topN = topN; @@ -89,11 +78,22 @@ public class ExactCounterMetric extends MetricsBase { this(nam, registry, NO_DESCRIPTION, DEFAULT_TOP_N); } - + /** + * Relies on an external lock on {@link #lock} for thread safety. + */ + private Counter getOrCreateCounter(String type){ + Counter cnt = counts.get(type); + if (cnt == null){ + cnt = new Counter(); + counts.put(type, cnt); + } + return cnt; + } + public void update(String type) { this.lock.readLock().lock(); try { - this.counts.get(type).increment(); + getOrCreateCounter(type).increment(); } finally { this.lock.readLock().unlock(); } @@ -102,7 +102,7 @@ public class ExactCounterMetric extends MetricsBase { public void update(String type, long count) { this.lock.readLock().lock(); try { - this.counts.get(type).add(count); + getOrCreateCounter(type).add(count); } finally { this.lock.readLock().unlock(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java index 26d8e61ab3d..3819fae2e67 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java @@ -30,12 +30,12 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hbase.codec.Codec; +import org.apache.hadoop.hbase.io.LimitInputStream; import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.util.Bytes; -import com.google.common.io.LimitInputStream; import com.google.protobuf.CodedInputStream; import com.google.protobuf.InvalidProtocolBufferException;