HBASE-8101 Cleanup: findbugs and javadoc warning fixes as well as making it illegal passing null row to Put/Delete, etc.
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457024 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
551c04f173
commit
466902ffde
|
@ -48,7 +48,7 @@ import java.net.UnknownHostException;
|
|||
/**
|
||||
* Tracks the availability of the catalog tables
|
||||
* <code>.META.</code>.
|
||||
*
|
||||
*
|
||||
* This class is "read-only" in that the locations of the catalog tables cannot
|
||||
* be explicitly set. Instead, ZooKeeper is used to learn of the availability
|
||||
* and location of <code>.META.</code>.
|
||||
|
@ -65,7 +65,7 @@ public class CatalogTracker {
|
|||
// servers when they needed to know of meta movement but also by
|
||||
// client-side (inside in HTable) so rather than figure meta
|
||||
// locations on fault, the client would instead get notifications out of zk.
|
||||
//
|
||||
//
|
||||
// But this original intent is frustrated by the fact that this class has to
|
||||
// read an hbase table, the -ROOT- table, to figure out the .META. region
|
||||
// location which means we depend on an HConnection. HConnection will do
|
||||
|
@ -110,13 +110,6 @@ public class CatalogTracker {
|
|||
private boolean instantiatedzkw = false;
|
||||
private Abortable abortable;
|
||||
|
||||
/*
|
||||
* Do not clear this address once set. Its needed when we do
|
||||
* server shutdown processing -- we need to know who had .META. last. If you
|
||||
* want to know if the address is good, rely on {@link #metaAvailable} value.
|
||||
*/
|
||||
private ServerName metaLocation;
|
||||
|
||||
private boolean stopped = false;
|
||||
|
||||
static final byte [] META_REGION_NAME =
|
||||
|
@ -147,7 +140,7 @@ public class CatalogTracker {
|
|||
* @param abortable If fatal exception we'll call abort on this. May be null.
|
||||
* If it is we'll use the Connection associated with the passed
|
||||
* {@link Configuration} as our Abortable.
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
|
||||
Abortable abortable)
|
||||
|
@ -193,7 +186,7 @@ public class CatalogTracker {
|
|||
* Determines current availability of catalog tables and ensures all further
|
||||
* transitions of either region are tracked.
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public void start() throws IOException, InterruptedException {
|
||||
LOG.debug("Starting catalog tracker " + this);
|
||||
|
@ -235,7 +228,7 @@ public class CatalogTracker {
|
|||
* not currently available.
|
||||
* @return {@link ServerName} for server hosting <code>.META.</code> or null
|
||||
* if none available
|
||||
* @throws InterruptedException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public ServerName getMetaLocation() throws InterruptedException {
|
||||
return this.metaRegionTracker.getMetaRegionLocation();
|
||||
|
@ -309,8 +302,6 @@ public class CatalogTracker {
|
|||
LOG.info(".META. still not available, sleeping and retrying." +
|
||||
" Reason: " + e.getMessage());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.info("Retrying", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -356,7 +347,7 @@ public class CatalogTracker {
|
|||
} else {
|
||||
throw ioe;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
return protocol;
|
||||
}
|
||||
|
@ -406,7 +397,7 @@ public class CatalogTracker {
|
|||
}
|
||||
}
|
||||
LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
|
||||
" at address=" + address + "; " + t);
|
||||
" at address=" + address + ", exception=" + t);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -416,7 +407,7 @@ public class CatalogTracker {
|
|||
* the internal call to {@link #waitForMetaServerConnection(long)}.
|
||||
* @return True if the <code>.META.</code> location is healthy.
|
||||
* @throws IOException
|
||||
* @throws InterruptedException
|
||||
* @throws InterruptedException
|
||||
*/
|
||||
public boolean verifyMetaRegionLocation(final long timeout)
|
||||
throws InterruptedException, IOException {
|
||||
|
|
|
@ -62,6 +62,11 @@ public class Action<R> implements Comparable<R> {
|
|||
return action.compareTo(((Action) o).getAction());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return this.action.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) return true;
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -24,10 +27,6 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Performs Append operations on a single row.
|
||||
* <p>
|
||||
|
@ -66,10 +65,22 @@ public class Append extends Mutation {
|
|||
* Create a Append operation for the specified row.
|
||||
* <p>
|
||||
* At least one column must be appended to.
|
||||
* @param row row key
|
||||
* @param row row key; makes a local copy of passed in array.
|
||||
*/
|
||||
public Append(byte[] row) {
|
||||
this.row = Arrays.copyOf(row, row.length);
|
||||
this(row, 0, row.length);
|
||||
}
|
||||
|
||||
/** Create a Append operation for the specified row.
|
||||
* <p>
|
||||
* At least one column must be appended to.
|
||||
* @param rowArray Makes a copy out of this buffer.
|
||||
* @param rowOffset
|
||||
* @param rowLength
|
||||
*/
|
||||
public Append(final byte [] rowArray, final int rowOffset, final int rowLength) {
|
||||
checkRow(rowArray, rowOffset, rowLength);
|
||||
this.row = Bytes.copy(rowArray, rowOffset, rowLength);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,11 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
@ -26,11 +31,6 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Used to perform Delete operations on a single row.
|
||||
* <p>
|
||||
|
@ -91,8 +91,27 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
* @param timestamp maximum version timestamp (only for delete row)
|
||||
*/
|
||||
public Delete(byte [] row, long timestamp) {
|
||||
this.row = row;
|
||||
this.ts = timestamp;
|
||||
this(row, 0, row.length, timestamp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Delete operation for the specified row and timestamp.<p>
|
||||
*
|
||||
* If no further operations are done, this will delete all columns in all
|
||||
* families of the specified row with a timestamp less than or equal to the
|
||||
* specified timestamp.<p>
|
||||
*
|
||||
* This timestamp is ONLY used for a delete row operation. If specifying
|
||||
* families or columns, you must specify each timestamp individually.
|
||||
* @param rowArray We make a local copy of this passed in row.
|
||||
* @param rowOffset
|
||||
* @param rowLength
|
||||
* @param ts maximum version timestamp (only for delete row)
|
||||
*/
|
||||
public Delete(final byte [] rowArray, final int rowOffset, final int rowLength, long ts) {
|
||||
checkRow(rowArray, rowOffset, rowLength);
|
||||
this.row = Bytes.copy(rowArray, rowOffset, rowLength);
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -121,10 +140,8 @@ public class Delete extends Mutation implements Comparable<Row> {
|
|||
}
|
||||
if (Bytes.compareTo(this.row, 0, row.length, kv.getBuffer(),
|
||||
kv.getRowOffset(), kv.getRowLength()) != 0) {
|
||||
throw new IOException("The row in the recently added KeyValue "
|
||||
+ Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(),
|
||||
kv.getRowLength()) + " doesn't match the original one "
|
||||
+ Bytes.toStringBinary(this.row));
|
||||
throw new WrongRowIOException("The row in " + kv.toString() +
|
||||
" doesn't match the original one " + Bytes.toStringBinary(this.row));
|
||||
}
|
||||
byte [] family = kv.getFamily();
|
||||
List<? extends Cell> list = familyMap.get(family);
|
||||
|
|
|
@ -19,14 +19,6 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -37,6 +29,14 @@ import java.util.Set;
|
|||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Used to perform Get operations on a single row.
|
||||
* <p>
|
||||
|
@ -83,6 +83,7 @@ public class Get extends OperationWithAttributes
|
|||
* @param row row key
|
||||
*/
|
||||
public Get(byte [] row) {
|
||||
Mutation.checkRow(row);
|
||||
this.row = row;
|
||||
}
|
||||
|
||||
|
@ -388,9 +389,17 @@ public class Get extends OperationWithAttributes
|
|||
//Row
|
||||
@Override
|
||||
public int compareTo(Row other) {
|
||||
// TODO: This is wrong. Can't have two gets the same just because on same row.
|
||||
return Bytes.compareTo(this.getRow(), other.getRow());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// TODO: This is wrong. Can't have two gets the same just because on same row. But it
|
||||
// matches how equals works currently and gets rid of the findbugs warning.
|
||||
return Bytes.hashCode(this.getRow());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
|
@ -400,6 +409,7 @@ public class Get extends OperationWithAttributes
|
|||
return false;
|
||||
}
|
||||
Row other = (Row) obj;
|
||||
// TODO: This is wrong. Can't have two gets the same just because on same row.
|
||||
return compareTo(other) == 0;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1350,7 +1350,7 @@ public class HBaseAdmin implements Abortable, Closeable {
|
|||
throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, null, false);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
|
@ -1404,7 +1404,7 @@ public class HBaseAdmin implements Abortable, Closeable {
|
|||
throws IOException, InterruptedException {
|
||||
compact(tableNameOrRegionName, null, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Major compact a column family within a table or region.
|
||||
* Asynchronous operation.
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
|
@ -28,7 +27,6 @@ import java.util.TreeMap;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
|
@ -52,17 +50,47 @@ public class Increment extends Mutation implements Comparable<Row> {
|
|||
private TimeRange tr = new TimeRange();
|
||||
|
||||
/**
|
||||
* Create a Increment operation for the specified row, using an existing row
|
||||
* lock.
|
||||
* Create a Increment operation for the specified row.
|
||||
* <p>
|
||||
* At least one column must be incremented.
|
||||
* @param row row key
|
||||
* @param row row key (we will make a copy of this).
|
||||
*/
|
||||
public Increment(byte [] row) {
|
||||
if (row == null || row.length > HConstants.MAX_ROW_LENGTH) {
|
||||
throw new IllegalArgumentException("Row key is invalid");
|
||||
this(row, 0, row.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Increment operation for the specified row.
|
||||
* <p>
|
||||
* At least one column must be incremented.
|
||||
* @param row row key (we will make a copy of this).
|
||||
*/
|
||||
public Increment(final byte [] row, final int offset, final int length) {
|
||||
checkRow(row, offset, length);
|
||||
this.row = Bytes.copy(row, offset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the specified KeyValue to this operation.
|
||||
* @param cell individual Cell
|
||||
* @return this
|
||||
* @throws java.io.IOException e
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Increment add(Cell cell) throws IOException{
|
||||
KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
|
||||
byte [] family = kv.getFamily();
|
||||
List<? extends Cell> list = getCellList(family);
|
||||
//Checking that the row of the kv is the same as the put
|
||||
int res = Bytes.compareTo(this.row, 0, row.length,
|
||||
kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
|
||||
if (res != 0) {
|
||||
throw new WrongRowIOException("The row in " + kv.toString() +
|
||||
" doesn't match the original one " + Bytes.toStringBinary(this.row));
|
||||
}
|
||||
this.row = Arrays.copyOf(row, row.length);
|
||||
((List<KeyValue>)list).add(kv);
|
||||
familyMap.put(family, list);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -204,11 +232,20 @@ public class Increment extends Mutation implements Comparable<Row> {
|
|||
|
||||
@Override
|
||||
public int compareTo(Row i) {
|
||||
// TODO: This is wrong. Can't have two the same just because on same row.
|
||||
return Bytes.compareTo(this.getRow(), i.getRow());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// TODO: This is wrong. Can't have two gets the same just because on same row. But it
|
||||
// matches how equals works currently and gets rid of the findbugs warning.
|
||||
return Bytes.hashCode(this.getRow());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
// TODO: This is wrong. Can't have two the same just because on same row.
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -250,6 +250,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
|
|||
}
|
||||
|
||||
/**
|
||||
* Number of KeyValues carried by this Mutation.
|
||||
* @return the total number of KeyValues
|
||||
*/
|
||||
public int size() {
|
||||
|
@ -299,4 +300,36 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
|
|||
heapsize += getAttributeSize();
|
||||
return heapsize;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param row Row to check
|
||||
* @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or
|
||||
* > {@link HConstants#MAX_ROW_LENGTH}
|
||||
* @return <code>row</code>
|
||||
*/
|
||||
static byte [] checkRow(final byte [] row) {
|
||||
return checkRow(row, 0, row == null? 0: row.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param row Row to check
|
||||
* @param offset
|
||||
* @param length
|
||||
* @throws IllegalArgumentException Thrown if <code>row</code> is empty or null or
|
||||
* > {@link HConstants#MAX_ROW_LENGTH}
|
||||
* @return <code>row</code>
|
||||
*/
|
||||
static byte [] checkRow(final byte [] row, final int offset, final int length) {
|
||||
if (row == null) {
|
||||
throw new IllegalArgumentException("Row buffer is null");
|
||||
}
|
||||
if (length == 0) {
|
||||
throw new IllegalArgumentException("Row length is 0");
|
||||
}
|
||||
if (length > HConstants.MAX_ROW_LENGTH) {
|
||||
throw new IllegalArgumentException("Row length " + length + " is > " +
|
||||
HConstants.MAX_ROW_LENGTH);
|
||||
}
|
||||
return row;
|
||||
}
|
||||
}
|
|
@ -57,14 +57,23 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
|
|||
/**
|
||||
* Create a Put operation for the specified row, using a given timestamp.
|
||||
*
|
||||
* @param row row key
|
||||
* @param row row key; we make a copy of what we are passed to keep local.
|
||||
* @param ts timestamp
|
||||
*/
|
||||
public Put(byte[] row, long ts) {
|
||||
if(row == null || row.length > HConstants.MAX_ROW_LENGTH) {
|
||||
throw new IllegalArgumentException("Row key is invalid");
|
||||
}
|
||||
this.row = Arrays.copyOf(row, row.length);
|
||||
this(row, 0, row.length, ts);
|
||||
}
|
||||
|
||||
/**
|
||||
* We make a copy of the passed in row key to keep local.
|
||||
* @param rowArray
|
||||
* @param rowOffset
|
||||
* @param rowLength
|
||||
* @param ts
|
||||
*/
|
||||
public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) {
|
||||
checkRow(rowArray, rowOffset, rowLength);
|
||||
this.row = Bytes.copy(rowArray, rowOffset, rowLength);
|
||||
this.ts = ts;
|
||||
}
|
||||
|
||||
|
@ -125,11 +134,9 @@ public class Put extends Mutation implements HeapSize, Comparable<Row> {
|
|||
//Checking that the row of the kv is the same as the put
|
||||
int res = Bytes.compareTo(this.row, 0, row.length,
|
||||
kv.getBuffer(), kv.getRowOffset(), kv.getRowLength());
|
||||
if(res != 0) {
|
||||
throw new IOException("The row in the recently added KeyValue " +
|
||||
Bytes.toStringBinary(kv.getBuffer(), kv.getRowOffset(),
|
||||
kv.getRowLength()) + " doesn't match the original one " +
|
||||
Bytes.toStringBinary(this.row));
|
||||
if (res != 0) {
|
||||
throw new WrongRowIOException("The row in " + kv.toString() +
|
||||
" doesn't match the original one " + Bytes.toStringBinary(this.row));
|
||||
}
|
||||
((List<KeyValue>)list).add(kv);
|
||||
familyMap.put(family, list);
|
||||
|
|
|
@ -17,23 +17,26 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Performs multiple mutations atomically on a single row.
|
||||
* Currently {@link Put} and {@link Delete} are supported.
|
||||
*
|
||||
* The mutations are performed in the order in which they
|
||||
* were added.
|
||||
*
|
||||
* <p>We compare and equate mutations based off their row so be careful putting RowMutations
|
||||
* into Sets or using them as keys in Maps.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -88,6 +91,16 @@ public class RowMutations implements Row {
|
|||
return Bytes.compareTo(this.getRow(), i.getRow());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) return true;
|
||||
if (obj instanceof RowMutations) {
|
||||
RowMutations other = (RowMutations)obj;
|
||||
return compareTo(other) == 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getRow() {
|
||||
return row;
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.exceptions.HBaseIOException;
|
||||
|
||||
public class WrongRowIOException extends HBaseIOException {
|
||||
private static final long serialVersionUID = -5849522209440123059L;
|
||||
|
||||
public WrongRowIOException(final String msg) {
|
||||
super(msg);
|
||||
}
|
||||
}
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.exceptions;
|
||||
|
||||
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
||||
|
||||
/**
|
||||
* Exception thrown by access-related methods.
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.exceptions;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
||||
|
||||
/**
|
||||
* Thrown if a coprocessor encounters any exception.
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hbase.exceptions;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
||||
|
||||
/**
|
||||
* This exception is thrown when attempts to read an HFile fail due to corruption or truncation
|
||||
|
|
|
@ -56,4 +56,4 @@ public class DoNotRetryIOException extends HBaseIOException {
|
|||
public DoNotRetryIOException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.exceptions;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.exceptions.DoNotRetryIOException;
|
||||
|
||||
/**
|
||||
* Reports a problem with a lease
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.google.protobuf.InvalidProtocolBufferException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.exceptions.DeserializationException;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ComparatorProtos;
|
||||
|
||||
/**
|
||||
|
@ -47,6 +48,11 @@ public class NullComparator extends ByteArrayComparable {
|
|||
return obj == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(byte[] value, int offset, int length) {
|
||||
throw new UnsupportedOperationException();
|
||||
|
@ -69,9 +75,9 @@ public class NullComparator extends ByteArrayComparable {
|
|||
*/
|
||||
public static NullComparator parseFrom(final byte [] pbBytes)
|
||||
throws DeserializationException {
|
||||
ComparatorProtos.NullComparator proto;
|
||||
try {
|
||||
proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
|
||||
@SuppressWarnings("unused")
|
||||
ComparatorProtos.NullComparator proto = ComparatorProtos.NullComparator.parseFrom(pbBytes);
|
||||
} catch (InvalidProtocolBufferException e) {
|
||||
throw new DeserializationException(e);
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public enum AuthMethod {
|
|||
private static final int FIRST_CODE = values()[0].code;
|
||||
|
||||
/** Return the object represented by the code. */
|
||||
private static AuthMethod valueOf(byte code) {
|
||||
public static AuthMethod valueOf(byte code) {
|
||||
final int i = (code & 0xff) - FIRST_CODE;
|
||||
return i < 0 || i >= values().length ? null : values()[i];
|
||||
}
|
||||
|
|
|
@ -1587,7 +1587,7 @@ public class ZKUtil {
|
|||
try {
|
||||
getReplicationZnodesDump(zkw, sb);
|
||||
} catch (KeeperException ke) {
|
||||
LOG.warn("Couldn't get the replication znode dump." + ke.getStackTrace());
|
||||
LOG.warn("Couldn't get the replication znode dump", ke);
|
||||
}
|
||||
sb.append("\nQuorum Server Statistics:");
|
||||
String[] servers = zkw.getQuorum().split(",");
|
||||
|
|
|
@ -29,9 +29,10 @@ import org.junit.experimental.categories.Category;
|
|||
|
||||
@Category(SmallTests.class)
|
||||
public class TestAttributes {
|
||||
private static final byte [] ROW = new byte [] {'r'};
|
||||
@Test
|
||||
public void testPutAttributes() {
|
||||
Put put = new Put(new byte [] {});
|
||||
Put put = new Put(ROW);
|
||||
Assert.assertTrue(put.getAttributesMap().isEmpty());
|
||||
Assert.assertNull(put.getAttribute("absent"));
|
||||
|
||||
|
@ -79,7 +80,7 @@ public class TestAttributes {
|
|||
|
||||
@Test
|
||||
public void testDeleteAttributes() {
|
||||
Delete del = new Delete(new byte [] {});
|
||||
Delete del = new Delete(new byte [] {'r'});
|
||||
Assert.assertTrue(del.getAttributesMap().isEmpty());
|
||||
Assert.assertNull(del.getAttribute("absent"));
|
||||
|
||||
|
@ -126,7 +127,7 @@ public class TestAttributes {
|
|||
|
||||
@Test
|
||||
public void testGetId() {
|
||||
Get get = new Get(null);
|
||||
Get get = new Get(ROW);
|
||||
Assert.assertNull("Make sure id is null if unset", get.toMap().get("id"));
|
||||
get.setId("myId");
|
||||
Assert.assertEquals("myId", get.toMap().get("id"));
|
||||
|
@ -134,7 +135,7 @@ public class TestAttributes {
|
|||
|
||||
@Test
|
||||
public void testAppendId() {
|
||||
Append append = new Append(Bytes.toBytes("testRow"));
|
||||
Append append = new Append(ROW);
|
||||
Assert.assertNull("Make sure id is null if unset", append.toMap().get("id"));
|
||||
append.setId("myId");
|
||||
Assert.assertEquals("myId", append.toMap().get("id"));
|
||||
|
@ -142,7 +143,7 @@ public class TestAttributes {
|
|||
|
||||
@Test
|
||||
public void testDeleteId() {
|
||||
Delete delete = new Delete(new byte [] {});
|
||||
Delete delete = new Delete(ROW);
|
||||
Assert.assertNull("Make sure id is null if unset", delete.toMap().get("id"));
|
||||
delete.setId("myId");
|
||||
Assert.assertEquals("myId", delete.toMap().get("id"));
|
||||
|
@ -150,7 +151,7 @@ public class TestAttributes {
|
|||
|
||||
@Test
|
||||
public void testPutId() {
|
||||
Put put = new Put(new byte [] {});
|
||||
Put put = new Put(ROW);
|
||||
Assert.assertNull("Make sure id is null if unset", put.toMap().get("id"));
|
||||
put.setId("myId");
|
||||
Assert.assertEquals("myId", put.toMap().get("id"));
|
||||
|
@ -163,6 +164,4 @@ public class TestAttributes {
|
|||
scan.setId("myId");
|
||||
Assert.assertEquals("myId", scan.toMap().get("id"));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -34,6 +34,7 @@ import org.junit.experimental.categories.Category;
|
|||
// TODO: cover more test cases
|
||||
@Category(SmallTests.class)
|
||||
public class TestGet {
|
||||
private static final byte [] ROW = new byte [] {'r'};
|
||||
@Test
|
||||
public void testAttributesSerialization() throws IOException {
|
||||
Get get = new Get(Bytes.toBytes("row"));
|
||||
|
@ -53,7 +54,7 @@ public class TestGet {
|
|||
|
||||
@Test
|
||||
public void testGetAttributes() {
|
||||
Get get = new Get(null);
|
||||
Get get = new Get(ROW);
|
||||
Assert.assertTrue(get.getAttributesMap().isEmpty());
|
||||
Assert.assertNull(get.getAttribute("absent"));
|
||||
|
||||
|
@ -100,11 +101,10 @@ public class TestGet {
|
|||
|
||||
@Test
|
||||
public void testNullQualifier() {
|
||||
Get get = new Get(null);
|
||||
Get get = new Get(ROW);
|
||||
byte[] family = Bytes.toBytes("family");
|
||||
get.addColumn(family, null);
|
||||
Set<byte[]> qualifiers = get.getFamilyMap().get(family);
|
||||
Assert.assertEquals(1, qualifiers.size());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -34,6 +34,9 @@ import com.google.common.primitives.Longs;
|
|||
* regionname, from row. See KeyValue for how it has a special comparator to do .META. cells
|
||||
* and yet another for -ROOT-.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value="UNKNOWN",
|
||||
justification="Findbugs doesn't like the way we are negating the result of a compare in below")
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class CellComparator implements Comparator<Cell>, Serializable{
|
||||
|
|
|
@ -872,9 +872,11 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
|
|||
/**
|
||||
* Clones a KeyValue. This creates a copy, re-allocating the buffer.
|
||||
* @return Fully copied clone of this KeyValue
|
||||
* @throws CloneNotSupportedException
|
||||
*/
|
||||
@Override
|
||||
public KeyValue clone() {
|
||||
public KeyValue clone() throws CloneNotSupportedException {
|
||||
super.clone();
|
||||
byte [] b = new byte[this.length];
|
||||
System.arraycopy(this.bytes, this.offset, b, 0, this.length);
|
||||
KeyValue ret = new KeyValue(b, 0, b.length);
|
||||
|
@ -885,15 +887,6 @@ public class KeyValue implements Cell, HeapSize, Cloneable {
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a deep copy of this KeyValue, re-allocating the buffer.
|
||||
* Same function as {@link #clone()}. Added for clarity vs shallowCopy()
|
||||
* @return Deep copy of this KeyValue
|
||||
*/
|
||||
public KeyValue deepCopy() {
|
||||
return clone();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a shallow copy of this KeyValue, reusing the data byte buffer.
|
||||
* http://en.wikipedia.org/wiki/Object_copy
|
||||
|
|
|
@ -22,12 +22,12 @@ import java.io.InputStream;
|
|||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
||||
abstract class BaseDecoder implements Codec.Decoder {
|
||||
final InputStream in;
|
||||
public abstract class BaseDecoder implements Codec.Decoder {
|
||||
protected final InputStream in;
|
||||
private boolean hasNext = true;
|
||||
private Cell current = null;
|
||||
|
||||
BaseDecoder(final InputStream in) {
|
||||
public BaseDecoder(final InputStream in) {
|
||||
this.in = in;
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ abstract class BaseDecoder implements Codec.Decoder {
|
|||
* @return extract a Cell
|
||||
* @throws IOException
|
||||
*/
|
||||
abstract Cell parseCell() throws IOException;
|
||||
protected abstract Cell parseCell() throws IOException;
|
||||
|
||||
@Override
|
||||
public Cell current() {
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.io.OutputStream;
|
|||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
|
||||
abstract class BaseEncoder implements Codec.Encoder {
|
||||
public abstract class BaseEncoder implements Codec.Encoder {
|
||||
protected final OutputStream out;
|
||||
// This encoder is 'done' once flush has been called.
|
||||
protected boolean flushed = false;
|
||||
|
@ -34,7 +34,7 @@ abstract class BaseEncoder implements Codec.Encoder {
|
|||
@Override
|
||||
public abstract void write(Cell cell) throws IOException;
|
||||
|
||||
void checkFlushed() throws CodecException {
|
||||
protected void checkFlushed() throws CodecException {
|
||||
if (this.flushed) throw new CodecException("Flushed; done");
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ public class CellCodec implements Codec {
|
|||
super(in);
|
||||
}
|
||||
|
||||
Cell parseCell() throws IOException {
|
||||
protected Cell parseCell() throws IOException {
|
||||
byte [] row = readByteArray(this.in);
|
||||
byte [] family = readByteArray(in);
|
||||
byte [] qualifier = readByteArray(in);
|
||||
|
|
|
@ -66,7 +66,7 @@ public class KeyValueCodec implements Codec {
|
|||
super(in);
|
||||
}
|
||||
|
||||
Cell parseCell() throws IOException {
|
||||
protected Cell parseCell() throws IOException {
|
||||
return KeyValue.iscreate(in);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.util;
|
||||
package org.apache.hadoop.hbase.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
@ -27,6 +27,7 @@ import java.nio.channels.WritableByteChannel;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Not thread safe!
|
|
@ -209,8 +209,9 @@ public class DiffKeyDeltaEncoder extends BufferedDataBlockEncoder {
|
|||
state.familyNameWithSize =
|
||||
new byte[(state.familyLength & 0xff) + KeyValue.FAMILY_LENGTH_SIZE];
|
||||
state.familyNameWithSize[0] = state.familyLength;
|
||||
source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
|
||||
int read = source.read(state.familyNameWithSize, KeyValue.FAMILY_LENGTH_SIZE,
|
||||
state.familyLength);
|
||||
assert read == state.familyLength;
|
||||
}
|
||||
|
||||
// read flag
|
||||
|
|
|
@ -16,14 +16,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.io.encoding;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.FilterOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Field;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
|
|
@ -1672,6 +1672,21 @@ public class Bytes {
|
|||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the byte array given in parameter and return an instance
|
||||
* of a new byte array with the same length and the same content.
|
||||
* @param bytes the byte array to copy from
|
||||
* @return a copy of the given designated byte array
|
||||
* @param offset
|
||||
* @param length
|
||||
*/
|
||||
public static byte [] copy(byte [] bytes, final int offset, final int length) {
|
||||
if (bytes == null) return null;
|
||||
byte [] result = new byte[length];
|
||||
System.arraycopy(bytes, offset, result, 0, length);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Search sorted array "a" for byte "key". I can't remember if I wrote this or copied it from
|
||||
* somewhere. (mcorgan)
|
||||
|
|
|
@ -34,6 +34,9 @@ import com.google.common.primitives.Bytes;
|
|||
* Generate list of key values which are very useful to test data block encoding
|
||||
* and compression.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value="RV_ABSOLUTE_VALUE_OF_RANDOM_INT",
|
||||
justification="Should probably fix")
|
||||
public class RedundantKVGenerator {
|
||||
// row settings
|
||||
static byte[] DEFAULT_COMMON_PREFIX = new byte[0];
|
||||
|
|
|
@ -183,7 +183,6 @@ public class DemoClient {
|
|||
client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes);
|
||||
|
||||
// non-utf8 is now allowed in row names because HBase stores values as binary
|
||||
ByteBuffer bf = ByteBuffer.wrap(invalid);
|
||||
|
||||
mutations = new ArrayList<Mutation>();
|
||||
mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal));
|
||||
|
|
|
@ -137,6 +137,13 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
|
|||
.compareTo(impl.regionWrapper.getRegionName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) return true;
|
||||
if (!(obj instanceof MetricsRegionSourceImpl)) return false;
|
||||
return compareTo((MetricsRegionSourceImpl)obj) == 0;
|
||||
}
|
||||
|
||||
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
|
||||
if (closed) return;
|
||||
|
||||
|
|
|
@ -36,6 +36,9 @@ import java.util.concurrent.TimeUnit;
|
|||
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
|
||||
* are package private.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value="LI_LAZY_INIT_STATIC",
|
||||
justification="Yeah, its weird but its what we want")
|
||||
public class JmxCacheBuster {
|
||||
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
|
||||
private static Object lock = new Object();
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.regionserver.MetricsRegionSourceImpl;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.metrics2.impl.JmxCacheBuster;
|
||||
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
|
||||
|
@ -138,6 +139,13 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
|
|||
.compareTo(impl.regionWrapper.getRegionName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) return true;
|
||||
if (!(obj instanceof MetricsRegionSourceImpl)) return false;
|
||||
return compareTo((MetricsRegionSourceImpl)obj) == 0;
|
||||
}
|
||||
|
||||
void snapshot(MetricsRecordBuilder mrb, boolean ignored) {
|
||||
if (closed) return;
|
||||
|
||||
|
|
|
@ -35,6 +35,9 @@ import java.util.concurrent.TimeUnit;
|
|||
* This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used
|
||||
* are package private.
|
||||
*/
|
||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||
value="LI_LAZY_INIT_STATIC",
|
||||
justification="Yeah, its weird but its what we want")
|
||||
public class JmxCacheBuster {
|
||||
private static final Log LOG = LogFactory.getLog(JmxCacheBuster.class);
|
||||
private static Object lock = new Object();
|
||||
|
|
|
@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.IpcProtocol;
|
||||
import org.apache.hadoop.hbase.exceptions.CallerDisconnectedException;
|
||||
import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
|
||||
import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.ConnectionHeader;
|
||||
|
@ -86,7 +87,6 @@ import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandle
|
|||
import org.apache.hadoop.hbase.security.SaslStatus;
|
||||
import org.apache.hadoop.hbase.security.SaslUtil;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.util.ByteBufferOutputStream;
|
||||
import org.apache.hadoop.io.BytesWritable;
|
||||
import org.apache.hadoop.io.IntWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
|
|
@ -100,9 +100,9 @@ public class ImportTsv extends Configured implements Tool {
|
|||
|
||||
private int timestampKeyColumnIndex = DEFAULT_TIMESTAMP_COLUMN_INDEX;
|
||||
|
||||
public static String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY";
|
||||
public static final String ROWKEY_COLUMN_SPEC = "HBASE_ROW_KEY";
|
||||
|
||||
public static String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY";
|
||||
public static final String TIMESTAMPKEY_COLUMN_SPEC = "HBASE_TS_KEY";
|
||||
|
||||
/**
|
||||
* @param columnsSpecification the list of columns to parser out, comma separated.
|
||||
|
|
|
@ -41,7 +41,11 @@ public class KeyValueSortReducer extends Reducer<ImmutableBytesWritable, KeyValu
|
|||
throws java.io.IOException, InterruptedException {
|
||||
TreeSet<KeyValue> map = new TreeSet<KeyValue>(KeyValue.COMPARATOR);
|
||||
for (KeyValue kv: kvs) {
|
||||
map.add(kv.clone());
|
||||
try {
|
||||
map.add(kv.clone());
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new java.io.IOException(e);
|
||||
}
|
||||
}
|
||||
context.setStatus("Read " + map.getClass());
|
||||
int index = 0;
|
||||
|
@ -50,4 +54,4 @@ public class KeyValueSortReducer extends Reducer<ImmutableBytesWritable, KeyValu
|
|||
if (index > 0 && index % 100 == 0) context.setStatus("Wrote " + index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -105,15 +105,16 @@ public class DeleteTableHandler extends TableEventHandler {
|
|||
LOG.error("Couldn't delete " + tempTableDir);
|
||||
}
|
||||
|
||||
LOG.debug("Table '" + tableName + "' archived!");
|
||||
LOG.debug("Table '" + Bytes.toString(tableName) + "' archived!");
|
||||
} finally {
|
||||
String tableNameStr = Bytes.toString(tableName);
|
||||
// 6. Update table descriptor cache
|
||||
LOG.debug("Removing '" + tableName + "' descriptor.");
|
||||
LOG.debug("Removing '" + tableNameStr + "' descriptor.");
|
||||
this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
|
||||
|
||||
// 7. If entry for this table in zk, and up in AssignmentManager, remove it.
|
||||
LOG.debug("Marking '" + tableName + "' as deleted.");
|
||||
am.getZKTable().setDeletedTable(Bytes.toString(tableName));
|
||||
LOG.debug("Marking '" + tableNameStr + "' as deleted.");
|
||||
am.getZKTable().setDeletedTable(tableNameStr);
|
||||
}
|
||||
|
||||
if (cpHost != null) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
|
@ -67,24 +66,17 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||
import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException;
|
||||
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
|
||||
import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.exceptions.RegionTooBusyException;
|
||||
import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||
import org.apache.hadoop.hbase.client.Append;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -98,6 +90,13 @@ import org.apache.hadoop.hbase.client.Row;
|
|||
import org.apache.hadoop.hbase.client.RowMutations;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
|
||||
import org.apache.hadoop.hbase.exceptions.DroppedSnapshotException;
|
||||
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
|
||||
import org.apache.hadoop.hbase.exceptions.NoSuchColumnFamilyException;
|
||||
import org.apache.hadoop.hbase.exceptions.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.exceptions.RegionTooBusyException;
|
||||
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
|
||||
import org.apache.hadoop.hbase.exceptions.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.exceptions.WrongRegionException;
|
||||
import org.apache.hadoop.hbase.filter.ByteArrayComparable;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
||||
|
@ -110,7 +109,6 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseServer;
|
||||
import org.apache.hadoop.hbase.ipc.RpcCallContext;
|
||||
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
|
||||
|
@ -118,14 +116,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServic
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl.WriteEntry;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
|
@ -1670,6 +1666,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Row needed by below method.
|
||||
*/
|
||||
private static final byte [] FOR_UNIT_TESTS_ONLY = Bytes.toBytes("ForUnitTestsOnly");
|
||||
/**
|
||||
* This is used only by unit tests. Not required to be a public API.
|
||||
* @param familyMap map of family to edits for the given family.
|
||||
|
@ -1678,7 +1678,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
*/
|
||||
void delete(NavigableMap<byte[], List<? extends Cell>> familyMap, UUID clusterId,
|
||||
boolean writeToWAL) throws IOException {
|
||||
Delete delete = new Delete(HConstants.EMPTY_BYTE_ARRAY);
|
||||
Delete delete = new Delete(FOR_UNIT_TESTS_ONLY);
|
||||
delete.setFamilyMap(familyMap);
|
||||
delete.setClusterId(clusterId);
|
||||
delete.setWriteToWAL(writeToWAL);
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.thrift.TException;
|
|||
* thrift server dies or is shut down before everything in the queue is drained.
|
||||
*
|
||||
*/
|
||||
|
||||
public class IncrementCoalescer implements IncrementCoalescerMBean {
|
||||
|
||||
/**
|
||||
|
|
|
@ -261,8 +261,9 @@ public class TestZooKeeper {
|
|||
HTable ipMeta = new HTable(otherConf, HConstants.META_TABLE_NAME);
|
||||
|
||||
// dummy, just to open the connection
|
||||
localMeta.exists(new Get(HConstants.LAST_ROW));
|
||||
ipMeta.exists(new Get(HConstants.LAST_ROW));
|
||||
final byte [] row = new byte [] {'r'};
|
||||
localMeta.exists(new Get(row));
|
||||
ipMeta.exists(new Get(row));
|
||||
|
||||
// make sure they aren't the same
|
||||
ZooKeeperWatcher z1 =
|
||||
|
@ -359,8 +360,26 @@ public class TestZooKeeper {
|
|||
"testMasterAddressManagerFromZK", null);
|
||||
|
||||
// Save the previous ACL
|
||||
Stat s = new Stat();
|
||||
List<ACL> oldACL = zk.getACL("/", s);
|
||||
Stat s = null;
|
||||
List<ACL> oldACL = null;
|
||||
while (true) {
|
||||
try {
|
||||
s = new Stat();
|
||||
oldACL = zk.getACL("/", s);
|
||||
break;
|
||||
} catch (KeeperException e) {
|
||||
switch (e.code()) {
|
||||
case CONNECTIONLOSS:
|
||||
case SESSIONEXPIRED:
|
||||
case OPERATIONTIMEOUT:
|
||||
LOG.warn("Possibly transient ZooKeeper exception", e);
|
||||
Threads.sleep(100);
|
||||
break;
|
||||
default:
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// I set this acl after the attempted creation of the cluster home node.
|
||||
// Add retries in case of retryable zk exceptions.
|
||||
|
|
|
@ -33,6 +33,9 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.ScannerCallable;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseClient;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseServer;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
|
@ -40,6 +43,8 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.log4j.Level;
|
||||
|
||||
/**
|
||||
* Test {@link MetaReader}, {@link MetaEditor}.
|
||||
|
@ -63,10 +68,12 @@ public class TestMetaReaderEditor {
|
|||
public boolean isAborted() {
|
||||
return abort.get();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
@BeforeClass public static void beforeClass() throws Exception {
|
||||
((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL);
|
||||
((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
|
||||
UTIL.startMiniCluster(3);
|
||||
|
||||
Configuration c = new Configuration(UTIL.getConfiguration());
|
||||
|
|
|
@ -4306,6 +4306,8 @@ public class TestFromClientSide {
|
|||
fail("Should have thrown IllegalArgumentException");
|
||||
} catch (IllegalArgumentException iax) {
|
||||
// success
|
||||
} catch (NullPointerException npe) {
|
||||
// success
|
||||
}
|
||||
// try null family
|
||||
try {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.client;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -388,20 +389,19 @@ public class TestFromClientSide3 {
|
|||
table.put(put);
|
||||
table.flushCommits();
|
||||
|
||||
//Try getting the row with an empty row key and make sure the other base cases work as well
|
||||
Result res = table.get(new Get(new byte[0]));
|
||||
assertTrue(res.isEmpty() == true);
|
||||
//Try getting the row with an empty row key
|
||||
Result res = null;
|
||||
try {
|
||||
res = table.get(new Get(new byte[0]));
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Expected.
|
||||
}
|
||||
assertTrue(res == null);
|
||||
res = table.get(new Get(Bytes.toBytes("r1-not-exist")));
|
||||
assertTrue(res.isEmpty() == true);
|
||||
res = table.get(new Get(ROW_BYTES));
|
||||
assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES));
|
||||
|
||||
//Now actually put in a row with an empty row key
|
||||
put = new Put(new byte[0]);
|
||||
put.add(FAMILY, COL_QUAL, VAL_BYTES);
|
||||
table.put(put);
|
||||
table.flushCommits();
|
||||
res = table.get(new Get(new byte[0]));
|
||||
assertTrue(Arrays.equals(res.getValue(FAMILY, COL_QUAL), VAL_BYTES));
|
||||
table.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import junit.framework.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
|
@ -83,10 +84,12 @@ public class TestHTableMultiplexer {
|
|||
|
||||
// SinglePut case
|
||||
for (int i = 0; i < NUM_REGIONS; i++) {
|
||||
Put put = new Put(startRows[i]);
|
||||
byte [] row = startRows[i];
|
||||
if (row == null || row.length <= 0) continue;
|
||||
Put put = new Put(row);
|
||||
put.add(FAMILY, QUALIFIER, VALUE1);
|
||||
success = multiplexer.put(TABLE, put);
|
||||
Assert.assertTrue(success);
|
||||
assertTrue(success);
|
||||
|
||||
// ensure the buffer has been flushed
|
||||
verifyAllBufferedPutsHaveFlushed(status);
|
||||
|
@ -99,32 +102,35 @@ public class TestHTableMultiplexer {
|
|||
do {
|
||||
r = ht.get(get);
|
||||
} while (r == null || r.getValue(FAMILY, QUALIFIER) == null);
|
||||
Assert.assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER)));
|
||||
assertEquals(0, Bytes.compareTo(VALUE1, r.getValue(FAMILY, QUALIFIER)));
|
||||
}
|
||||
|
||||
// MultiPut case
|
||||
List<Put> multiput = new ArrayList<Put>();
|
||||
for (int i = 0; i < NUM_REGIONS; i++) {
|
||||
Put put = new Put(endRows[i]);
|
||||
byte [] row = endRows[i];
|
||||
if (row == null || row.length <= 0) continue;
|
||||
Put put = new Put(row);
|
||||
put.add(FAMILY, QUALIFIER, VALUE2);
|
||||
multiput.add(put);
|
||||
}
|
||||
failedPuts = multiplexer.put(TABLE, multiput);
|
||||
Assert.assertTrue(failedPuts == null);
|
||||
assertTrue(failedPuts == null);
|
||||
|
||||
// ensure the buffer has been flushed
|
||||
verifyAllBufferedPutsHaveFlushed(status);
|
||||
|
||||
// verify that the Get returns the correct result
|
||||
for (int i = 0; i < NUM_REGIONS; i++) {
|
||||
Get get = new Get(endRows[i]);
|
||||
byte [] row = endRows[i];
|
||||
if (row == null || row.length <= 0) continue;
|
||||
Get get = new Get(row);
|
||||
get.addColumn(FAMILY, QUALIFIER);
|
||||
Result r;
|
||||
do {
|
||||
r = ht.get(get);
|
||||
} while (r == null || r.getValue(FAMILY, QUALIFIER) == null);
|
||||
Assert.assertEquals(0,
|
||||
Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER)));
|
||||
assertEquals(0, Bytes.compareTo(VALUE2, r.getValue(FAMILY, QUALIFIER)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,7 +147,7 @@ public class TestHTableMultiplexer {
|
|||
}
|
||||
} while (status.getTotalBufferedCounter() != 0 && tries != retries);
|
||||
|
||||
Assert.assertEquals("There are still some buffered puts left in the queue",
|
||||
assertEquals("There are still some buffered puts left in the queue",
|
||||
0, status.getTotalBufferedCounter());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ConcurrentModificationException;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Test that I can Iterate Client Actions that hold Cells (Get does not have Cells).
|
||||
*/
|
||||
@Category(SmallTests.class)
|
||||
public class TestPutDeleteEtcCellIteration {
|
||||
private static final byte [] ROW = new byte [] {'r'};
|
||||
private static final long TIMESTAMP = System.currentTimeMillis();
|
||||
private static final int COUNT = 10;
|
||||
|
||||
@Test
|
||||
public void testPutIteration() {
|
||||
Put p = new Put(ROW);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
p.add(bytes, bytes, TIMESTAMP, bytes);
|
||||
}
|
||||
int index = 0;
|
||||
for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
|
||||
@Test (expected = ConcurrentModificationException.class)
|
||||
public void testPutConcurrentModificationOnIteration() {
|
||||
Put p = new Put(ROW);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
p.add(bytes, bytes, TIMESTAMP, bytes);
|
||||
}
|
||||
int index = 0;
|
||||
int trigger = 3;
|
||||
for (CellScanner cellScanner = p.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
// When we hit the trigger, try inserting a new KV; should trigger exception
|
||||
if (trigger == 3) p.add(bytes, bytes, TIMESTAMP, bytes);
|
||||
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDeleteIteration() {
|
||||
Delete d = new Delete(ROW);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
d.deleteColumn(bytes, bytes, TIMESTAMP);
|
||||
}
|
||||
int index = 0;
|
||||
for (CellScanner cellScanner = d.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, KeyValue.Type.DeleteColumn));
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAppendIteration() {
|
||||
Append a = new Append(ROW);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
a.add(bytes, bytes, bytes);
|
||||
}
|
||||
int index = 0;
|
||||
for (CellScanner cellScanner = a.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
KeyValue kv = (KeyValue)cell;
|
||||
assertTrue(Bytes.equals(kv.getFamily(), bytes));
|
||||
assertTrue(Bytes.equals(kv.getValue(), bytes));
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIncrementIteration() {
|
||||
Increment increment = new Increment(ROW);
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
increment.addColumn(bytes, bytes, i);
|
||||
}
|
||||
int index = 0;
|
||||
for (CellScanner cellScanner = increment.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
int value = index;
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
KeyValue kv = (KeyValue)cell;
|
||||
assertTrue(Bytes.equals(kv.getFamily(), bytes));
|
||||
long a = Bytes.toLong(kv.getValue());
|
||||
assertEquals(value, a);
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testResultIteration() {
|
||||
Cell [] cells = new Cell[COUNT];
|
||||
for(int i = 0; i < COUNT; i++) {
|
||||
byte [] bytes = Bytes.toBytes(i);
|
||||
cells[i] = new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes);
|
||||
}
|
||||
Result r = new Result(Arrays.asList(cells));
|
||||
int index = 0;
|
||||
for (CellScanner cellScanner = r.cellScanner(); cellScanner.advance();) {
|
||||
Cell cell = cellScanner.current();
|
||||
byte [] bytes = Bytes.toBytes(index++);
|
||||
cell.equals(new KeyValue(ROW, bytes, bytes, TIMESTAMP, bytes));
|
||||
}
|
||||
assertEquals(COUNT, index);
|
||||
}
|
||||
}
|
|
@ -318,7 +318,12 @@ public class TestCoprocessorInterface extends HBaseTestCase {
|
|||
// now have all Environments fail
|
||||
for (int i = 0; i < regions.length; i++) {
|
||||
try {
|
||||
Get g = new Get(regions[i].getStartKey());
|
||||
byte [] r = regions[i].getStartKey();
|
||||
if (r == null || r.length <= 0) {
|
||||
// Its the start row. Can't ask for null. Ask for minimal key instead.
|
||||
r = new byte [] {0};
|
||||
}
|
||||
Get g = new Get(r);
|
||||
regions[i].get(g);
|
||||
fail();
|
||||
} catch (org.apache.hadoop.hbase.exceptions.DoNotRetryIOException xc) {
|
||||
|
@ -342,7 +347,8 @@ public class TestCoprocessorInterface extends HBaseTestCase {
|
|||
findCoprocessor(CoprocessorII.class.getName());
|
||||
// new map and object created, hence the reference is different
|
||||
// hence the old entry was indeed removed by the GC and new one has been created
|
||||
assertFalse(((CoprocessorII)c2).getSharedData().get("test2") == o2);
|
||||
Object o3 = ((CoprocessorII)c2).getSharedData().get("test2");
|
||||
assertFalse(o3 == o2);
|
||||
}
|
||||
|
||||
public void testCoprocessorInterface() throws IOException {
|
||||
|
|
|
@ -20,10 +20,12 @@
|
|||
package org.apache.hadoop.hbase.io;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.TreeMap;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
@ -44,14 +46,11 @@ import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
|
|||
import org.apache.hadoop.hbase.io.hfile.CachedBlock;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.MemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.regionserver.MemStore;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.BeforeClass;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
/**
|
||||
* Testing the sizing that HeapSize offers and compares to the size given by
|
||||
|
@ -252,10 +251,10 @@ public class TestHeapSize extends TestCase {
|
|||
cl = Put.class;
|
||||
expected = ClassSize.estimateBase(cl, false);
|
||||
//The actual TreeMap is not included in the above calculation
|
||||
expected += ClassSize.TREEMAP;
|
||||
Put put = new Put(Bytes.toBytes(""));
|
||||
expected += ClassSize.align(ClassSize.TREEMAP + ClassSize.REFERENCE);
|
||||
Put put = new Put(new byte [] {'0'});
|
||||
actual = put.heapSize();
|
||||
if(expected != actual) {
|
||||
if (expected != actual) {
|
||||
ClassSize.estimateBase(cl, true);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
|
|
|
@ -477,7 +477,10 @@ public class TestEndToEndSplitTransaction {
|
|||
HTable table = new HTable(conf, hri.getTableName());
|
||||
|
||||
try {
|
||||
Get get = new Get(hri.getStartKey());
|
||||
byte [] row = hri.getStartKey();
|
||||
// Check for null/empty row. If we find one, use a key that is likely to be in first region.
|
||||
if (row == null || row.length <= 0) row = new byte [] {'0'};
|
||||
Get get = new Get(row);
|
||||
while (System.currentTimeMillis() - start < timeout) {
|
||||
try {
|
||||
table.get(get);
|
||||
|
|
|
@ -164,8 +164,9 @@ public class TestMemStore extends TestCase {
|
|||
/**
|
||||
* A simple test which verifies the 3 possible states when scanning across snapshot.
|
||||
* @throws IOException
|
||||
* @throws CloneNotSupportedException
|
||||
*/
|
||||
public void testScanAcrossSnapshot2() throws IOException {
|
||||
public void testScanAcrossSnapshot2() throws IOException, CloneNotSupportedException {
|
||||
// we are going to the scanning across snapshot with two kvs
|
||||
// kv1 should always be returned before kv2
|
||||
final byte[] one = Bytes.toBytes(1);
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.junit.BeforeClass;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@Category(MediumTests.class)
|
||||
public class TestRegionServerMetrics {
|
||||
|
|
Loading…
Reference in New Issue