diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java index 4f3a2f88867..46b85f1085d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java @@ -48,7 +48,7 @@ import java.net.UnknownHostException; /** * Tracks the availability of the catalog tables * .META.. - * + * * This class is "read-only" in that the locations of the catalog tables cannot * be explicitly set. Instead, ZooKeeper is used to learn of the availability * and location of .META.. @@ -65,7 +65,7 @@ public class CatalogTracker { // servers when they needed to know of meta movement but also by // client-side (inside in HTable) so rather than figure meta // locations on fault, the client would instead get notifications out of zk. - // + // // But this original intent is frustrated by the fact that this class has to // read an hbase table, the -ROOT- table, to figure out the .META. region // location which means we depend on an HConnection. HConnection will do @@ -110,13 +110,6 @@ public class CatalogTracker { private boolean instantiatedzkw = false; private Abortable abortable; - /* - * Do not clear this address once set. Its needed when we do - * server shutdown processing -- we need to know who had .META. last. If you - * want to know if the address is good, rely on {@link #metaAvailable} value. - */ - private ServerName metaLocation; - private boolean stopped = false; static final byte [] META_REGION_NAME = @@ -147,7 +140,7 @@ public class CatalogTracker { * @param abortable If fatal exception we'll call abort on this. May be null. * If it is we'll use the Connection associated with the passed * {@link Configuration} as our Abortable. - * @throws IOException + * @throws IOException */ public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf, Abortable abortable) @@ -193,7 +186,7 @@ public class CatalogTracker { * Determines current availability of catalog tables and ensures all further * transitions of either region are tracked. * @throws IOException - * @throws InterruptedException + * @throws InterruptedException */ public void start() throws IOException, InterruptedException { LOG.debug("Starting catalog tracker " + this); @@ -235,7 +228,7 @@ public class CatalogTracker { * not currently available. * @return {@link ServerName} for server hosting .META. or null * if none available - * @throws InterruptedException + * @throws InterruptedException */ public ServerName getMetaLocation() throws InterruptedException { return this.metaRegionTracker.getMetaRegionLocation(); @@ -309,8 +302,6 @@ public class CatalogTracker { LOG.info(".META. still not available, sleeping and retrying." + " Reason: " + e.getMessage()); } - } catch (IOException e) { - LOG.info("Retrying", e); } } } @@ -356,7 +347,7 @@ public class CatalogTracker { } else { throw ioe; } - + } return protocol; } @@ -406,7 +397,7 @@ public class CatalogTracker { } } LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) + - " at address=" + address + "; " + t); + " at address=" + address + ", exception=" + t); return false; } @@ -416,7 +407,7 @@ public class CatalogTracker { * the internal call to {@link #waitForMetaServerConnection(long)}. * @return True if the .META. location is healthy. * @throws IOException - * @throws InterruptedException + * @throws InterruptedException */ public boolean verifyMetaRegionLocation(final long timeout) throws InterruptedException, IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index ba14702d3a7..9a41fcb45c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -62,6 +62,11 @@ public class Action implements Comparable { return action.compareTo(((Action) o).getAction()); } + @Override + public int hashCode() { + return this.action.hashCode(); + } + @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 5f27aaa8e72..dfefde62a89 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hbase.client; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; @@ -24,10 +27,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.util.Bytes; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - /** * Performs Append operations on a single row. *

@@ -66,10 +65,22 @@ public class Append extends Mutation { * Create a Append operation for the specified row. *

* At least one column must be appended to. - * @param row row key + * @param row row key; makes a local copy of passed in array. */ public Append(byte[] row) { - this.row = Arrays.copyOf(row, row.length); + this(row, 0, row.length); + } + + /** Create a Append operation for the specified row. + *

+ * At least one column must be appended to. + * @param rowArray Makes a copy out of this buffer. + * @param rowOffset + * @param rowLength + */ + public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { + checkRow(rowArray, rowOffset, rowLength); + this.row = Bytes.copy(rowArray, rowOffset, rowLength); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index 0db0167a839..5efd45b1148 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -19,6 +19,11 @@ package org.apache.hadoop.hbase.client; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; @@ -26,11 +31,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.util.Bytes; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - /** * Used to perform Delete operations on a single row. *

@@ -91,8 +91,27 @@ public class Delete extends Mutation implements Comparable { * @param timestamp maximum version timestamp (only for delete row) */ public Delete(byte [] row, long timestamp) { - this.row = row; - this.ts = timestamp; + this(row, 0, row.length, timestamp); + } + + /** + * Create a Delete operation for the specified row and timestamp.

+ * + * If no further operations are done, this will delete all columns in all + * families of the specified row with a timestamp less than or equal to the + * specified timestamp.

+ * + * This timestamp is ONLY used for a delete row operation. If specifying + * families or columns, you must specify each timestamp individually. + * @param rowArray We make a local copy of this passed in row. + * @param rowOffset + * @param rowLength + * @param ts maximum version timestamp (only for delete row) + */ + public Delete(final byte [] rowArray, final int rowOffset, final int rowLength, long ts) { + checkRow(rowArray, rowOffset, rowLength); + this.row = Bytes.copy(rowArray, rowOffset, rowLength); + this.ts = ts; } /** @@ -121,10 +140,8 @@ public class Delete extends Mutation implements Comparable { } if (Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()) != 0) { - throw new IOException("The row in the recently added KeyValue " - + Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " - + Bytes.toStringBinary(this.row)); + throw new WrongRowIOException("The row in " + kv.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } byte [] family = kv.getFamily(); List list = familyMap.get(family); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index 0b6636dcf14..3e3cdce5d3c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -19,14 +19,6 @@ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.filter.Filter; -import org.apache.hadoop.hbase.io.TimeRange; -import org.apache.hadoop.hbase.util.Bytes; - import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -37,6 +29,14 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.util.Bytes; + /** * Used to perform Get operations on a single row. *

@@ -83,6 +83,7 @@ public class Get extends OperationWithAttributes * @param row row key */ public Get(byte [] row) { + Mutation.checkRow(row); this.row = row; } @@ -388,9 +389,17 @@ public class Get extends OperationWithAttributes //Row @Override public int compareTo(Row other) { + // TODO: This is wrong. Can't have two gets the same just because on same row. return Bytes.compareTo(this.getRow(), other.getRow()); } + @Override + public int hashCode() { + // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // matches how equals works currently and gets rid of the findbugs warning. + return Bytes.hashCode(this.getRow()); + } + @Override public boolean equals(Object obj) { if (this == obj) { @@ -400,6 +409,7 @@ public class Get extends OperationWithAttributes return false; } Row other = (Row) obj; + // TODO: This is wrong. Can't have two gets the same just because on same row. return compareTo(other) == 0; } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index a9bae629f21..690c810acf2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1350,7 +1350,7 @@ public class HBaseAdmin implements Abortable, Closeable { throws IOException, InterruptedException { compact(tableNameOrRegionName, null, false); } - + /** * Compact a column family within a table or region. * Asynchronous operation. @@ -1404,7 +1404,7 @@ public class HBaseAdmin implements Abortable, Closeable { throws IOException, InterruptedException { compact(tableNameOrRegionName, null, true); } - + /** * Major compact a column family within a table or region. * Asynchronous operation. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index c5a8608d48a..6f12f51cfb2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.NavigableMap; @@ -28,7 +27,6 @@ import java.util.TreeMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.io.TimeRange; @@ -52,17 +50,47 @@ public class Increment extends Mutation implements Comparable { private TimeRange tr = new TimeRange(); /** - * Create a Increment operation for the specified row, using an existing row - * lock. + * Create a Increment operation for the specified row. *

* At least one column must be incremented. - * @param row row key + * @param row row key (we will make a copy of this). */ public Increment(byte [] row) { - if (row == null || row.length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row key is invalid"); + this(row, 0, row.length); + } + + /** + * Create a Increment operation for the specified row. + *

+ * At least one column must be incremented. + * @param row row key (we will make a copy of this). + */ + public Increment(final byte [] row, final int offset, final int length) { + checkRow(row, offset, length); + this.row = Bytes.copy(row, offset, length); + } + + /** + * Add the specified KeyValue to this operation. + * @param cell individual Cell + * @return this + * @throws java.io.IOException e + */ + @SuppressWarnings("unchecked") + public Increment add(Cell cell) throws IOException{ + KeyValue kv = KeyValueUtil.ensureKeyValue(cell); + byte [] family = kv.getFamily(); + List list = getCellList(family); + //Checking that the row of the kv is the same as the put + int res = Bytes.compareTo(this.row, 0, row.length, + kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); + if (res != 0) { + throw new WrongRowIOException("The row in " + kv.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } - this.row = Arrays.copyOf(row, row.length); + ((List)list).add(kv); + familyMap.put(family, list); + return this; } /** @@ -204,11 +232,20 @@ public class Increment extends Mutation implements Comparable { @Override public int compareTo(Row i) { + // TODO: This is wrong. Can't have two the same just because on same row. return Bytes.compareTo(this.getRow(), i.getRow()); } + @Override + public int hashCode() { + // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // matches how equals works currently and gets rid of the findbugs warning. + return Bytes.hashCode(this.getRow()); + } + @Override public boolean equals(Object obj) { + // TODO: This is wrong. Can't have two the same just because on same row. if (this == obj) { return true; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index 4df496a83f0..2fcb652a1a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -250,6 +250,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C } /** + * Number of KeyValues carried by this Mutation. * @return the total number of KeyValues */ public int size() { @@ -299,4 +300,36 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C heapsize += getAttributeSize(); return heapsize; } -} + + /** + * @param row Row to check + * @throws IllegalArgumentException Thrown if row is empty or null or + * > {@link HConstants#MAX_ROW_LENGTH} + * @return row + */ + static byte [] checkRow(final byte [] row) { + return checkRow(row, 0, row == null? 0: row.length); + } + + /** + * @param row Row to check + * @param offset + * @param length + * @throws IllegalArgumentException Thrown if row is empty or null or + * > {@link HConstants#MAX_ROW_LENGTH} + * @return row + */ + static byte [] checkRow(final byte [] row, final int offset, final int length) { + if (row == null) { + throw new IllegalArgumentException("Row buffer is null"); + } + if (length == 0) { + throw new IllegalArgumentException("Row length is 0"); + } + if (length > HConstants.MAX_ROW_LENGTH) { + throw new IllegalArgumentException("Row length " + length + " is > " + + HConstants.MAX_ROW_LENGTH); + } + return row; + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index 7819723b2d3..97c55ecd25f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -57,14 +57,23 @@ public class Put extends Mutation implements HeapSize, Comparable { /** * Create a Put operation for the specified row, using a given timestamp. * - * @param row row key + * @param row row key; we make a copy of what we are passed to keep local. * @param ts timestamp */ public Put(byte[] row, long ts) { - if(row == null || row.length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row key is invalid"); - } - this.row = Arrays.copyOf(row, row.length); + this(row, 0, row.length, ts); + } + + /** + * We make a copy of the passed in row key to keep local. + * @param rowArray + * @param rowOffset + * @param rowLength + * @param ts + */ + public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { + checkRow(rowArray, rowOffset, rowLength); + this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; } @@ -125,11 +134,9 @@ public class Put extends Mutation implements HeapSize, Comparable { //Checking that the row of the kv is the same as the put int res = Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(), kv.getRowOffset(), kv.getRowLength()); - if(res != 0) { - throw new IOException("The row in the recently added KeyValue " + - Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(), - kv.getRowLength()) + " doesn't match the original one " + - Bytes.toStringBinary(this.row)); + if (res != 0) { + throw new WrongRowIOException("The row in " + kv.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } ((List)list).add(kv); familyMap.put(family, list); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java index ae5895a1cfb..e612e038272 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowMutations.java @@ -17,23 +17,26 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.Bytes; - import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.util.Bytes; + /** * Performs multiple mutations atomically on a single row. * Currently {@link Put} and {@link Delete} are supported. * * The mutations are performed in the order in which they * were added. + * + *

We compare and equate mutations based off their row so be careful putting RowMutations + * into Sets or using them as keys in Maps. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -88,6 +91,16 @@ public class RowMutations implements Row { return Bytes.compareTo(this.getRow(), i.getRow()); } + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + if (obj instanceof RowMutations) { + RowMutations other = (RowMutations)obj; + return compareTo(other) == 0; + } + return false; + } + @Override public byte[] getRow() { return row; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java new file mode 100644 index 00000000000..a44dc8aaee0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/WrongRowIOException.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.hbase.exceptions.HBaseIOException; + +public class WrongRowIOException extends HBaseIOException { + private static final long serialVersionUID = -5849522209440123059L; + + public WrongRowIOException(final String msg) { + super(msg); + } +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java index 3d759e8bd56..45949648305 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/AccessDeniedException.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.exceptions; -import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; /** * Exception thrown by access-related methods. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java index a66591ea1e7..5e5409a55c6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CoprocessorException.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; /** * Thrown if a coprocessor encounters any exception. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java index 61288ea86b3..3b92b3579d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/CorruptHFileException.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; /** * This exception is thrown when attempts to read an HFile fail due to corruption or truncation diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java index a539a6650e0..8973233f111 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/DoNotRetryIOException.java @@ -56,4 +56,4 @@ public class DoNotRetryIOException extends HBaseIOException { public DoNotRetryIOException(Throwable cause) { super(cause); } -} +} \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java index 1d1cece986c..0ba7650cd5b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/LeaseException.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException; /** * Reports a problem with a lease diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java index b262bd1aa13..12008e2ddb9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -23,6 +23,7 @@ import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos; /** @@ -47,6 +48,11 @@ public class NullComparator extends ByteArrayComparable { return obj == null; } + @Override + public int hashCode() { + return 0; + } + @Override public int compareTo(byte[] value, int offset, int length) { throw new UnsupportedOperationException(); @@ -69,9 +75,9 @@ public class NullComparator extends ByteArrayComparable { */ public static NullComparator parseFrom(final byte [] pbBytes) throws DeserializationException { - ComparatorProtos.NullComparator proto; try { - proto = ComparatorProtos.NullComparator.parseFrom(pbBytes); + @SuppressWarnings("unused") + ComparatorProtos.NullComparator proto = ComparatorProtos.NullComparator.parseFrom(pbBytes); } catch (InvalidProtocolBufferException e) { throw new DeserializationException(e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java index 26ab35d7736..c223fa51036 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -46,7 +46,7 @@ public enum AuthMethod { private static final int FIRST_CODE = values()[0].code; /** Return the object represented by the code. */ - private static AuthMethod valueOf(byte code) { + public static AuthMethod valueOf(byte code) { final int i = (code & 0xff) - FIRST_CODE; return i < 0 || i >= values().length ? null : values()[i]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index ea0b255b2fc..f147299262f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -1587,7 +1587,7 @@ public class ZKUtil { try { getReplicationZnodesDump(zkw, sb); } catch (KeeperException ke) { - LOG.warn("Couldn't get the replication znode dump." + ke.getStackTrace()); + LOG.warn("Couldn't get the replication znode dump", ke); } sb.append("\nQuorum Server Statistics:"); String[] servers = zkw.getQuorum().split(","); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java index 8c5abbabe3e..ef8f0222a30 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -29,9 +29,10 @@ import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestAttributes { + private static final byte [] ROW = new byte [] {'r'}; @Test public void testPutAttributes() { - Put put = new Put(new byte [] {}); + Put put = new Put(ROW); Assert.assertTrue(put.getAttributesMap().isEmpty()); Assert.assertNull(put.getAttribute("absent")); @@ -79,7 +80,7 @@ public class TestAttributes { @Test public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {}); + Delete del = new Delete(new byte [] {'r'}); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); @@ -126,7 +127,7 @@ public class TestAttributes { @Test public void testGetId() { - Get get = new Get(null); + Get get = new Get(ROW); Assert.assertNull("Make sure id is null if unset", get.toMap().get("id")); get.setId("myId"); Assert.assertEquals("myId", get.toMap().get("id")); @@ -134,7 +135,7 @@ public class TestAttributes { @Test public void testAppendId() { - Append append = new Append(Bytes.toBytes("testRow")); + Append append = new Append(ROW); Assert.assertNull("Make sure id is null if unset", append.toMap().get("id")); append.setId("myId"); Assert.assertEquals("myId", append.toMap().get("id")); @@ -142,7 +143,7 @@ public class TestAttributes { @Test public void testDeleteId() { - Delete delete = new Delete(new byte [] {}); + Delete delete = new Delete(ROW); Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id")); delete.setId("myId"); Assert.assertEquals("myId", delete.toMap().get("id")); @@ -150,7 +151,7 @@ public class TestAttributes { @Test public void testPutId() { - Put put = new Put(new byte [] {}); + Put put = new Put(ROW); Assert.assertNull("Make sure id is null if unset", put.toMap().get("id")); put.setId("myId"); Assert.assertEquals("myId", put.toMap().get("id")); @@ -163,6 +164,4 @@ public class TestAttributes { scan.setId("myId"); Assert.assertEquals("myId", scan.toMap().get("id")); } - -} - +} \ No newline at end of file diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 3d01d1bea16..419d41dfe05 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -34,6 +34,7 @@ import org.junit.experimental.categories.Category; // TODO: cover more test cases @Category(SmallTests.class) public class TestGet { + private static final byte [] ROW = new byte [] {'r'}; @Test public void testAttributesSerialization() throws IOException { Get get = new Get(Bytes.toBytes("row")); @@ -53,7 +54,7 @@ public class TestGet { @Test public void testGetAttributes() { - Get get = new Get(null); + Get get = new Get(ROW); Assert.assertTrue(get.getAttributesMap().isEmpty()); Assert.assertNull(get.getAttribute("absent")); @@ -100,11 +101,10 @@ public class TestGet { @Test public void testNullQualifier() { - Get get = new Get(null); + Get get = new Get(ROW); byte[] family = Bytes.toBytes("family"); get.addColumn(family, null); Set qualifiers = get.getFamilyMap().get(family); Assert.assertEquals(1, qualifiers.size()); } -} - +} \ No newline at end of file diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java index 2e9e4f2818a..2993dd5ea93 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java @@ -34,6 +34,9 @@ import com.google.common.primitives.Longs; * regionname, from row. See KeyValue for how it has a special comparator to do .META. cells * and yet another for -ROOT-. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="UNKNOWN", + justification="Findbugs doesn't like the way we are negating the result of a compare in below") @InterfaceAudience.Private @InterfaceStability.Evolving public class CellComparator implements Comparator, Serializable{ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java index 4dc2dae08fa..a30c0e6ecdc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java @@ -872,9 +872,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable { /** * Clones a KeyValue. This creates a copy, re-allocating the buffer. * @return Fully copied clone of this KeyValue + * @throws CloneNotSupportedException */ @Override - public KeyValue clone() { + public KeyValue clone() throws CloneNotSupportedException { + super.clone(); byte [] b = new byte[this.length]; System.arraycopy(this.bytes, this.offset, b, 0, this.length); KeyValue ret = new KeyValue(b, 0, b.length); @@ -885,15 +887,6 @@ public class KeyValue implements Cell, HeapSize, Cloneable { return ret; } - /** - * Creates a deep copy of this KeyValue, re-allocating the buffer. - * Same function as {@link #clone()}. Added for clarity vs shallowCopy() - * @return Deep copy of this KeyValue - */ - public KeyValue deepCopy() { - return clone(); - } - /** * Creates a shallow copy of this KeyValue, reusing the data byte buffer. * http://en.wikipedia.org/wiki/Object_copy diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java index 3743069251c..0edb781e108 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseDecoder.java @@ -22,12 +22,12 @@ import java.io.InputStream; import org.apache.hadoop.hbase.Cell; -abstract class BaseDecoder implements Codec.Decoder { - final InputStream in; +public abstract class BaseDecoder implements Codec.Decoder { + protected final InputStream in; private boolean hasNext = true; private Cell current = null; - BaseDecoder(final InputStream in) { + public BaseDecoder(final InputStream in) { this.in = in; } @@ -50,7 +50,7 @@ abstract class BaseDecoder implements Codec.Decoder { * @return extract a Cell * @throws IOException */ - abstract Cell parseCell() throws IOException; + protected abstract Cell parseCell() throws IOException; @Override public Cell current() { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java index c7a4aaba8e0..941fb0ee99d 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/BaseEncoder.java @@ -22,7 +22,7 @@ import java.io.OutputStream; import org.apache.hadoop.hbase.Cell; -abstract class BaseEncoder implements Codec.Encoder { +public abstract class BaseEncoder implements Codec.Encoder { protected final OutputStream out; // This encoder is 'done' once flush has been called. protected boolean flushed = false; @@ -34,7 +34,7 @@ abstract class BaseEncoder implements Codec.Encoder { @Override public abstract void write(Cell cell) throws IOException; - void checkFlushed() throws CodecException { + protected void checkFlushed() throws CodecException { if (this.flushed) throw new CodecException("Flushed; done"); } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java index e3b79726572..2acf9deedad 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/CellCodec.java @@ -77,7 +77,7 @@ public class CellCodec implements Codec { super(in); } - Cell parseCell() throws IOException { + protected Cell parseCell() throws IOException { byte [] row = readByteArray(this.in); byte [] family = readByteArray(in); byte [] qualifier = readByteArray(in); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java index 33a21b85a37..99db83091bf 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/KeyValueCodec.java @@ -66,7 +66,7 @@ public class KeyValueCodec implements Codec { super(in); } - Cell parseCell() throws IOException { + protected Cell parseCell() throws IOException { return KeyValue.iscreate(in); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java similarity index 97% rename from hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java rename to hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java index fb0094ce087..ec48bdba56d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ByteBufferOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferOutputStream.java @@ -17,7 +17,7 @@ * limitations under the License. */ -package org.apache.hadoop.hbase.util; +package org.apache.hadoop.hbase.io; import java.io.IOException; import java.io.OutputStream; @@ -27,6 +27,7 @@ import java.nio.channels.WritableByteChannel; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.Bytes; /** * Not thread safe! diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java index 039187de2b3..4c29120b3eb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DiffKeyDeltaEncoder.java @@ -209,8 +209,9 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder { state.familyNameWithSize = new byte[(state.familyLength & 0xff) + KeyValue.FAMILY_LENGTH_SIZE]; state.familyNameWithSize[0] = state.familyLength; - source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE, + int read = source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE, state.familyLength); + assert read == state.familyLength; } // read flag diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java index 925801a2321..590baf59c45 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/PrefixKeyDeltaEncoder.java @@ -16,14 +16,10 @@ */ package org.apache.hadoop.hbase.io.encoding; -import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; -import java.io.FilterOutputStream; import java.io.IOException; -import java.lang.reflect.Field; import java.nio.ByteBuffer; -import java.util.Arrays; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hbase.KeyValue; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index e804ece1e42..30fa08bb604 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -1672,6 +1672,21 @@ public class Bytes { return result; } + /** + * Copy the byte array given in parameter and return an instance + * of a new byte array with the same length and the same content. + * @param bytes the byte array to copy from + * @return a copy of the given designated byte array + * @param offset + * @param length + */ + public static byte [] copy(byte [] bytes, final int offset, final int length) { + if (bytes == null) return null; + byte [] result = new byte[length]; + System.arraycopy(bytes, offset, result, 0, length); + return result; + } + /** * Search sorted array "a" for byte "key". I can't remember if I wrote this or copied it from * somewhere. (mcorgan) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java index e3d8cb2bf91..e3cf8378a0b 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/test/RedundantKVGenerator.java @@ -34,6 +34,9 @@ import com.google.common.primitives.Bytes; * Generate list of key values which are very useful to test data block encoding * and compression. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="RV_ABSOLUTE_VALUE_OF_RANDOM_INT", + justification="Should probably fix") public class RedundantKVGenerator { // row settings static byte[] DEFAULT_COMMON_PREFIX = new byte[0]; diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java index d3eaf7dd552..1af5f6248cc 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java @@ -183,7 +183,6 @@ public class DemoClient { client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes); // non-utf8 is now allowed in row names because HBase stores values as binary - ByteBuffer bf = ByteBuffer.wrap(invalid); mutations = new ArrayList(); mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal)); diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index b4f8d56a49f..329b6673e01 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -137,6 +137,13 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { .compareTo(impl.regionWrapper.getRegionName()); } + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + if (!(obj instanceof MetricsRegionSourceImpl)) return false; + return compareTo((MetricsRegionSourceImpl)obj) == 0; + } + void snapshot(MetricsRecordBuilder mrb, boolean ignored) { if (closed) return; diff --git a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 741449c25ac..d84722dd2fe 100644 --- a/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop1-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -36,6 +36,9 @@ import java.util.concurrent.TimeUnit; * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used * are package private. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="LI_LAZY_INIT_STATIC", + justification="Yeah, its weird but its what we want") public class JmxCacheBuster { private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class); private static Object lock = new Object(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 6877362dbcf..9f2d7494698 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.regionserver.MetricsRegionSourceImpl; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.impl.JmxCacheBuster; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -138,6 +139,13 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { .compareTo(impl.regionWrapper.getRegionName()); } + @Override + public boolean equals(Object obj) { + if (obj == this) return true; + if (!(obj instanceof MetricsRegionSourceImpl)) return false; + return compareTo((MetricsRegionSourceImpl)obj) == 0; + } + void snapshot(MetricsRecordBuilder mrb, boolean ignored) { if (closed) return; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 9fa9fa1096d..e51f6d5079a 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -35,6 +35,9 @@ import java.util.concurrent.TimeUnit; * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used * are package private. */ +@edu.umd.cs.findbugs.annotations.SuppressWarnings( + value="LI_LAZY_INIT_STATIC", + justification="Yeah, its weird but its what we want") public class JmxCacheBuster { private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class); private static Object lock = new Object(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java index 970d5236c0a..61aa70e1118 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/HBaseServer.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.IpcProtocol; import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException; import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException; +import org.apache.hadoop.hbase.io.ByteBufferOutputStream; import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader; @@ -86,7 +87,6 @@ import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandle import org.apache.hadoop.hbase.security.SaslStatus; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.User; -import org.apache.hadoop.hbase.util.ByteBufferOutputStream; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Writable; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index 99639dde9f7..dd2c5eda5ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -100,9 +100,9 @@ public class ImportTsv extends Configured implements Tool { private int timestampKeyColumnIndex = DEFAULT_TIMESTAMP_COLUMN_INDEX; - public static String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY"; + public static final String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY"; - public static String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY"; + public static final String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY"; /** * @param columnsSpecification the list of columns to parser out, comma separated. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index e746f95c44d..66f49d43440 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -41,7 +41,11 @@ public class KeyValueSortReducer extends Reducer map = new TreeSet(KeyValue.COMPARATOR); for (KeyValue kv: kvs) { - map.add(kv.clone()); + try { + map.add(kv.clone()); + } catch (CloneNotSupportedException e) { + throw new java.io.IOException(e); + } } context.setStatus("Read " + map.getClass()); int index = 0; @@ -50,4 +54,4 @@ public class KeyValueSortReducer extends Reducer 0 && index % 100 == 0) context.setStatus("Wrote " + index); } } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 19da07f3fd5..12fcb813278 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -105,15 +105,16 @@ public class DeleteTableHandler extends TableEventHandler { LOG.error("Couldn't delete " + tempTableDir); } - LOG.debug("Table '" + tableName + "' archived!"); + LOG.debug("Table '" + Bytes.toString(tableName) + "' archived!"); } finally { + String tableNameStr = Bytes.toString(tableName); // 6. Update table descriptor cache - LOG.debug("Removing '" + tableName + "' descriptor."); + LOG.debug("Removing '" + tableNameStr + "' descriptor."); this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName)); // 7. If entry for this table in zk, and up in AssignmentManager, remove it. - LOG.debug("Marking '" + tableName + "' as deleted."); - am.getZKTable().setDeletedTable(Bytes.toString(tableName)); + LOG.debug("Marking '" + tableNameStr + "' as deleted."); + am.getZKTable().setDeletedTable(tableNameStr); } if (cpHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 8baf053fdb9..f42d572b5a4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.EOFException; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.io.UnsupportedEncodingException; @@ -67,24 +66,17 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException; -import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CompoundConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException; -import org.apache.hadoop.hbase.exceptions.NotServingRegionException; -import org.apache.hadoop.hbase.exceptions.RegionTooBusyException; -import org.apache.hadoop.hbase.exceptions.UnknownScannerException; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.client.Append; import org.apache.hadoop.hbase.client.Delete; @@ -98,6 +90,13 @@ import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; +import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException; +import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; +import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException; +import org.apache.hadoop.hbase.exceptions.NotServingRegionException; +import org.apache.hadoop.hbase.exceptions.RegionTooBusyException; +import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; +import org.apache.hadoop.hbase.exceptions.UnknownScannerException; import org.apache.hadoop.hbase.exceptions.WrongRegionException; import org.apache.hadoop.hbase.filter.ByteArrayComparable; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; @@ -110,7 +109,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.ipc.RpcCallContext; -import org.apache.hadoop.hbase.exceptions.UnknownProtocolException; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; @@ -118,14 +116,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.HLogUtil; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; -import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.ClassSize; @@ -1670,6 +1666,10 @@ public class HRegion implements HeapSize { // , Writable{ } } + /** + * Row needed by below method. + */ + private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly"); /** * This is used only by unit tests. Not required to be a public API. * @param familyMap map of family to edits for the given family. @@ -1678,7 +1678,7 @@ public class HRegion implements HeapSize { // , Writable{ */ void delete(NavigableMap> familyMap, UUID clusterId, boolean writeToWAL) throws IOException { - Delete delete = new Delete(HConstants.EMPTY_BYTE_ARRAY); + Delete delete = new Delete(FOR_UNIT_TESTS_ONLY); delete.setFamilyMap(familyMap); delete.setClusterId(clusterId); delete.setWriteToWAL(writeToWAL); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index 01b8b23354e..1a39431d481 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -51,7 +51,6 @@ import org.apache.thrift.TException; * thrift server dies or is shut down before everything in the queue is drained. * */ - public class IncrementCoalescer implements IncrementCoalescerMBean { /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java index 02d98af35bb..32b293520e7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java @@ -261,8 +261,9 @@ public class TestZooKeeper { HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME); // dummy, just to open the connection - localMeta.exists(new Get(HConstants.LAST_ROW)); - ipMeta.exists(new Get(HConstants.LAST_ROW)); + final byte [] row = new byte [] {'r'}; + localMeta.exists(new Get(row)); + ipMeta.exists(new Get(row)); // make sure they aren't the same ZooKeeperWatcher z1 = @@ -359,8 +360,26 @@ public class TestZooKeeper { "testMasterAddressManagerFromZK", null); // Save the previous ACL - Stat s = new Stat(); - List oldACL = zk.getACL("/", s); + Stat s = null; + List oldACL = null; + while (true) { + try { + s = new Stat(); + oldACL = zk.getACL("/", s); + break; + } catch (KeeperException e) { + switch (e.code()) { + case CONNECTIONLOSS: + case SESSIONEXPIRED: + case OPERATIONTIMEOUT: + LOG.warn("Possibly transient ZooKeeper exception", e); + Threads.sleep(100); + break; + default: + throw e; + } + } + } // I set this acl after the attempted creation of the cluster home node. // Add retries in case of retryable zk exceptions. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java index 6571875155c..64c92543f44 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java @@ -33,6 +33,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.ScannerCallable; +import org.apache.hadoop.hbase.ipc.HBaseClient; +import org.apache.hadoop.hbase.ipc.HBaseServer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -40,6 +43,8 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.log4j.Level; /** * Test {@link MetaReader}, {@link MetaEditor}. @@ -63,10 +68,12 @@ public class TestMetaReaderEditor { public boolean isAborted() { return abort.get(); } - }; @BeforeClass public static void beforeClass() throws Exception { + ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL); + ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL); UTIL.startMiniCluster(3); Configuration c = new Configuration(UTIL.getConfiguration()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 2e3fd20e742..65eeed8663e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4306,6 +4306,8 @@ public class TestFromClientSide { fail("Should have thrown IllegalArgumentException"); } catch (IllegalArgumentException iax) { // success + } catch (NullPointerException npe) { + // success } // try null family try { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java index 7ffd9de55fa..c235309b730 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.client; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.Arrays; @@ -388,20 +389,19 @@ public class TestFromClientSide3 { table.put(put); table.flushCommits(); - //Try getting the row with an empty row key and make sure the other base cases work as well - Result res = table.get(new Get(new byte[0])); - assertTrue(res.isEmpty() == true); + //Try getting the row with an empty row key + Result res = null; + try { + res = table.get(new Get(new byte[0])); + fail(); + } catch (IllegalArgumentException e) { + // Expected. + } + assertTrue(res == null); res = table.get(new Get(Bytes.toBytes("r1-not-exist"))); assertTrue(res.isEmpty() == true); res = table.get(new Get(ROW_BYTES)); assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES)); - - //Now actually put in a row with an empty row key - put = new Put(new byte[0]); - put.add(FAMILY, COL_QUAL, VAL_BYTES); - table.put(put); - table.flushCommits(); - res = table.get(new Get(new byte[0])); - assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES)); + table.close(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java index 461de4c8700..c2deb56c37a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexer.java @@ -19,11 +19,12 @@ */ package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + import java.util.ArrayList; import java.util.List; -import junit.framework.Assert; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -83,10 +84,12 @@ public class TestHTableMultiplexer { // SinglePut case for (int i = 0; i < NUM_REGIONS; i++) { - Put put = new Put(startRows[i]); + byte [] row = startRows[i]; + if (row == null || row.length <= 0) continue; + Put put = new Put(row); put.add(FAMILY, QUALIFIER, VALUE1); success = multiplexer.put(TABLE, put); - Assert.assertTrue(success); + assertTrue(success); // ensure the buffer has been flushed verifyAllBufferedPutsHaveFlushed(status); @@ -99,32 +102,35 @@ public class TestHTableMultiplexer { do { r = ht.get(get); } while (r == null || r.getValue(FAMILY, QUALIFIER) == null); - Assert.assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER))); + assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER))); } // MultiPut case List multiput = new ArrayList(); for (int i = 0; i < NUM_REGIONS; i++) { - Put put = new Put(endRows[i]); + byte [] row = endRows[i]; + if (row == null || row.length <= 0) continue; + Put put = new Put(row); put.add(FAMILY, QUALIFIER, VALUE2); multiput.add(put); } failedPuts = multiplexer.put(TABLE, multiput); - Assert.assertTrue(failedPuts == null); + assertTrue(failedPuts == null); // ensure the buffer has been flushed verifyAllBufferedPutsHaveFlushed(status); // verify that the Get returns the correct result for (int i = 0; i < NUM_REGIONS; i++) { - Get get = new Get(endRows[i]); + byte [] row = endRows[i]; + if (row == null || row.length <= 0) continue; + Get get = new Get(row); get.addColumn(FAMILY, QUALIFIER); Result r; do { r = ht.get(get); } while (r == null || r.getValue(FAMILY, QUALIFIER) == null); - Assert.assertEquals(0, - Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER))); + assertEquals(0, Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER))); } } @@ -141,7 +147,7 @@ public class TestHTableMultiplexer { } } while (status.getTotalBufferedCounter() != 0 && tries != retries); - Assert.assertEquals("There are still some buffered puts left in the queue", + assertEquals("There are still some buffered puts left in the queue", 0, status.getTotalBufferedCounter()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java new file mode 100644 index 00000000000..6d45aff7073 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestPutDeleteEtcCellIteration.java @@ -0,0 +1,149 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +import java.util.Arrays; +import java.util.ConcurrentModificationException; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellScanner; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +/** + * Test that I can Iterate Client Actions that hold Cells (Get does not have Cells). + */ +@Category(SmallTests.class) +public class TestPutDeleteEtcCellIteration { + private static final byte [] ROW = new byte [] {'r'}; + private static final long TIMESTAMP = System.currentTimeMillis(); + private static final int COUNT = 10; + + @Test + public void testPutIteration() { + Put p = new Put(ROW); + for (int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + p.add(bytes, bytes, TIMESTAMP, bytes); + } + int index = 0; + for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] bytes = Bytes.toBytes(index++); + cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes)); + } + assertEquals(COUNT, index); + } + + @Test (expected = ConcurrentModificationException.class) + public void testPutConcurrentModificationOnIteration() { + Put p = new Put(ROW); + for (int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + p.add(bytes, bytes, TIMESTAMP, bytes); + } + int index = 0; + int trigger = 3; + for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] bytes = Bytes.toBytes(index++); + // When we hit the trigger, try inserting a new KV; should trigger exception + if (trigger == 3) p.add(bytes, bytes, TIMESTAMP, bytes); + cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes)); + } + assertEquals(COUNT, index); + } + + @Test + public void testDeleteIteration() { + Delete d = new Delete(ROW); + for (int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + d.deleteColumn(bytes, bytes, TIMESTAMP); + } + int index = 0; + for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] bytes = Bytes.toBytes(index++); + cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, KeyValue.Type.DeleteColumn)); + } + assertEquals(COUNT, index); + } + + @Test + public void testAppendIteration() { + Append a = new Append(ROW); + for (int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + a.add(bytes, bytes, bytes); + } + int index = 0; + for (CellScanner cellScanner = a.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] bytes = Bytes.toBytes(index++); + KeyValue kv = (KeyValue)cell; + assertTrue(Bytes.equals(kv.getFamily(), bytes)); + assertTrue(Bytes.equals(kv.getValue(), bytes)); + } + assertEquals(COUNT, index); + } + + @Test + public void testIncrementIteration() { + Increment increment = new Increment(ROW); + for (int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + increment.addColumn(bytes, bytes, i); + } + int index = 0; + for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + int value = index; + byte [] bytes = Bytes.toBytes(index++); + KeyValue kv = (KeyValue)cell; + assertTrue(Bytes.equals(kv.getFamily(), bytes)); + long a = Bytes.toLong(kv.getValue()); + assertEquals(value, a); + } + assertEquals(COUNT, index); + } + + @Test + public void testResultIteration() { + Cell [] cells = new Cell[COUNT]; + for(int i = 0; i < COUNT; i++) { + byte [] bytes = Bytes.toBytes(i); + cells[i] = new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes); + } + Result r = new Result(Arrays.asList(cells)); + int index = 0; + for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) { + Cell cell = cellScanner.current(); + byte [] bytes = Bytes.toBytes(index++); + cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes)); + } + assertEquals(COUNT, index); + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index 9dde497dbce..fc4495df7ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -318,7 +318,12 @@ public class TestCoprocessorInterface extends HBaseTestCase { // now have all Environments fail for (int i = 0; i < regions.length; i++) { try { - Get g = new Get(regions[i].getStartKey()); + byte [] r = regions[i].getStartKey(); + if (r == null || r.length <= 0) { + // Its the start row. Can't ask for null. Ask for minimal key instead. + r = new byte [] {0}; + } + Get g = new Get(r); regions[i].get(g); fail(); } catch (org.apache.hadoop.hbase.exceptions.DoNotRetryIOException xc) { @@ -342,7 +347,8 @@ public class TestCoprocessorInterface extends HBaseTestCase { findCoprocessor(CoprocessorII.class.getName()); // new map and object created, hence the reference is different // hence the old entry was indeed removed by the GC and new one has been created - assertFalse(((CoprocessorII)c2).getSharedData().get("test2") == o2); + Object o3 = ((CoprocessorII)c2).getSharedData().get("test2"); + assertFalse(o3 == o2); } public void testCoprocessorInterface() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 9e94004ec8e..abdaac74412 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.io; import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.RuntimeMXBean; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.TreeMap; import java.util.Map; +import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.CopyOnWriteArrayList; @@ -44,14 +46,11 @@ import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.util.ClassSize; -import org.junit.experimental.categories.Category; import org.junit.BeforeClass; -import java.lang.management.ManagementFactory; -import java.lang.management.RuntimeMXBean; +import org.junit.experimental.categories.Category; /** * Testing the sizing that HeapSize offers and compares to the size given by @@ -252,10 +251,10 @@ public class TestHeapSize extends TestCase { cl = Put.class; expected = ClassSize.estimateBase(cl, false); //The actual TreeMap is not included in the above calculation - expected += ClassSize.TREEMAP; - Put put = new Put(Bytes.toBytes("")); + expected += ClassSize.align(ClassSize.TREEMAP + ClassSize.REFERENCE); + Put put = new Put(new byte [] {'0'}); actual = put.heapSize(); - if(expected != actual) { + if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index 86fd34d4672..b26aa9945da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -477,7 +477,10 @@ public class TestEndToEndSplitTransaction { HTable table = new HTable(conf, hri.getTableName()); try { - Get get = new Get(hri.getStartKey()); + byte [] row = hri.getStartKey(); + // Check for null/empty row. If we find one, use a key that is likely to be in first region. + if (row == null || row.length <= 0) row = new byte [] {'0'}; + Get get = new Get(row); while (System.currentTimeMillis() - start < timeout) { try { table.get(get); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java index dfc4c5ad83a..4c9cfe6281c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStore.java @@ -164,8 +164,9 @@ public class TestMemStore extends TestCase { /** * A simple test which verifies the 3 possible states when scanning across snapshot. * @throws IOException + * @throws CloneNotSupportedException */ - public void testScanAcrossSnapshot2() throws IOException { + public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException { // we are going to the scanning across snapshot with two kvs // kv1 should always be returned before kv2 final byte[] one = Bytes.toBytes(1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index 4e9a2f65284..a40a0d5c693 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -32,7 +32,6 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; @Category(MediumTests.class) public class TestRegionServerMetrics {