HBASE-1561 HTable Mismatch between javadoc and what it actually does

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@787318 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-06-22 17:22:52 +00:00
parent f8046be708
commit 2bf31afa70
6 changed files with 41 additions and 14 deletions

View File

@ -211,6 +211,7 @@ Release 0.20.0 - Unreleased
HBASE-1545 atomicIncrements creating new values with Long.MAX_VALUE
HBASE-1547 atomicIncrement doesnt increase hregion.memcacheSize
HBASE-1553 ClassSize missing in trunk
HBASE-1561 HTable Mismatch between javadoc and what it actually does
IMPROVEMENTS
HBASE-1089 Add count of regions on filesystem to master UI; add percentage

View File

@ -87,13 +87,16 @@ public class Delete implements Writable {
/**
* Create a Delete operation for the specified row and timestamp, using
* an optional row lock.
* <p>
* an optional row lock.<p>
*
* If no further operations are done, this will delete all columns in all
* families of the specified row with a timestamp less than or equal to the
* specified timestamp.
* specified timestamp.<p>
*
* This timestamp is ONLY used for a delete row operation. If specifying
* families or columns, you must specify each timestamp individually.
* @param row row key
* @param timestamp maximum version timestamp
* @param timestamp maximum version timestamp (only for delete row)
* @param rowLock previously acquired row lock, or null
*/
public Delete(byte [] row, long timestamp, RowLock rowLock) {
@ -169,6 +172,18 @@ public class Delete implements Writable {
familyMap.put(family, list);
}
/**
* Delete all versions of the specified column, given in
* <code>family:qualifier</code> notation, and with a timestamp less than
* or equal to the specified timestamp.
* @param column colon-delimited family and qualifier
* @param timestamp maximum version timestamp
*/
public void deleteColumns(byte [] column, long timestamp) {
byte [][] parts = KeyValue.parseColumn(column);
this.deleteColumns(parts[0], parts[1], timestamp);
}
/**
* Delete the latest version of the specified column.
* This is an expensive call in that on the server-side, it first does a

View File

@ -57,7 +57,7 @@ import org.apache.hadoop.io.Writable;
* To limit the number of versions of each column to be returned, execute
* {@link #setMaxVersions(int) setMaxVersions}.
* <p>
* To add a filter, execute {@link #setFilter(RowFilterInterface) setFilter}.
* To add a filter, execute {@link #setFilter(Filter) setFilter}.
*/
public class Get implements Writable {
private byte [] row = null;
@ -402,4 +402,4 @@ public class Get implements Writable {
}
}
}
}
}

View File

@ -1411,7 +1411,9 @@ public class HTable {
final RowLock rl)
throws IOException {
Delete d = new Delete(row, ts, rl);
d.deleteColumn(column);
if(column != null) {
d.deleteColumns(column, ts);
}
delete(d);
}
@ -1544,9 +1546,8 @@ public class HTable {
public void deleteFamily(final byte [] row, final byte [] family,
final long timestamp, final RowLock rl)
throws IOException {
// Is this right? LATEST_TS? St.Ack
Delete d = new Delete(row, HConstants.LATEST_TIMESTAMP, rl);
d.deleteFamily(family);
d.deleteFamily(stripColon(family), timestamp);
delete(d);
}
@ -2071,4 +2072,14 @@ public class HTable {
};
}
}
private static byte [] stripColon(final byte [] n) {
byte col = n[n.length-1];
if (col == ':') {
byte [] res = new byte[n.length-1];
System.arraycopy(n, 0, res, 0, n.length-1);
return res;
}
return n;
}
}

View File

@ -93,4 +93,4 @@ public class RowWhileMatchFilter implements Filter {
throw new RuntimeException("Failed deserialize.", e);
}
}
}
}

View File

@ -20,11 +20,11 @@
/**Provides row-level filters applied to HRegion scan results during calls to
* {@link org.apache.hadoop.hbase.client.ResultScanner#next()}.
<p>Since HBase 0.20.0, {@link Filter} is the new Interface used filtering.
It replaces the deprecated {@link RowFilterInterface}.
<p>Since HBase 0.20.0, {@link org.apache.hadoop.hbase.filter.Filter} is the new Interface used filtering.
It replaces the deprecated {@link org.apache.hadoop.hbase.filter.RowFilterInterface}.
Filters run the extent of a table unless you wrap your filter in a
{@link RowWhileMatchFilter}. The latter returns as soon as the filter
stops matching.
{@link org.apache.hadoop.hbase.filter.RowWhileMatchFilter}.
The latter returns as soon as the filter stops matching.
</p>
*/
package org.apache.hadoop.hbase.filter;