HBASE-2044 HBASE-1822 removed not-deprecated APIs

HBASE-2041  Change WAL default configuration values
            changes to the shell and fixing interrupt bug


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@891073 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2009-12-15 23:41:40 +00:00
parent 83f441fb18
commit 04155cac92
9 changed files with 192 additions and 21 deletions

View File

@ -128,6 +128,7 @@ Release 0.21.0 - Unreleased
documentation doesn't work (Benoit Sigoure via JD)
HBASE-2048 Small inconsistency in the "Example API Usage"
(Benoit Sigoure via JD)
HBASE-2044 HBASE-1822 removed not-deprecated APIs
IMPROVEMENTS
HBASE-1760 Cleanup TODOs in HTable

View File

@ -222,13 +222,18 @@ module HBase
if method == "delete"
@admin.deleteColumn(tableName, args[NAME])
elsif method == "table_att"
args[MAX_FILESIZE]? htd.setMaxFileSize(JLong.valueOf(args[MAX_FILESIZE])) :
htd.setMaxFileSize(HTableDescriptor::DEFAULT_MAX_FILESIZE);
args[READONLY]? htd.setReadOnly(JBoolean.valueOf(args[READONLY])) :
htd.setReadOnly(HTableDescriptor::DEFAULT_READONLY);
args[MEMSTORE_FLUSHSIZE]?
htd.setMemStoreFlushSize(JLong.valueOf(args[MEMSTORE_FLUSHSIZE])) :
htd.setMemStoreFlushSize(HTableDescriptor::DEFAULT_MEMSTORE_FLUSH_SIZE);
if args[MAX_FILESIZE]
htd.setMaxFileSize(JLong.valueOf(args[MAX_FILESIZE]))
end
if args[READONLY]
htd.setReadOnly(JBoolean.valueOf(args[READONLY]))
end
if args[MEMSTORE_FLUSHSIZE]
htd.setMemStoreFlushSize(JLong.valueOf(args[MEMSTORE_FLUSHSIZE]))
end
if args[DEFERRED_LOG_FLUSH]
htd.setDeferredLogFlush(JBoolean.valueOf(args[DEFERRED_LOG_FLUSH]))
end
@admin.modifyTable(tableName.to_java_bytes, htd)
else
descriptor = hcd(args)

View File

@ -176,7 +176,7 @@ HBASE SHELL COMMANDS:
hbase> alter 't1', {NAME => 'f1', METHOD => 'delete'}
You can also change table-scope attributes like MAX_FILESIZE
MEMSTORE_FLUSHSIZE and READONLY.
MEMSTORE_FLUSHSIZE, READONLY, and DEFERRED_LOG_FLUSH.
For example, to change the max size of a family to 128MB, do:
hbase> alter 't1', {METHOD => 'table_att', MAX_FILESIZE => '134217728'}

View File

@ -343,4 +343,32 @@ public class Delete implements Writable, Row, Comparable<Row> {
}
}
}
/**
* Delete all versions of the specified column, given in
* <code>family:qualifier</code> notation, and with a timestamp less than
* or equal to the specified timestamp.
* @param column colon-delimited family and qualifier
* @param timestamp maximum version timestamp
* @deprecated use {@link #deleteColumn(byte[], byte[], long)} instead
*/
public Delete deleteColumns(byte [] column, long timestamp) {
byte [][] parts = KeyValue.parseColumn(column);
this.deleteColumns(parts[0], parts[1], timestamp);
return this;
}
/**
* Delete the latest version of the specified column, given in
* <code>family:qualifier</code> notation.
* @param column colon-delimited family and qualifier
* @deprecated use {@link #deleteColumn(byte[], byte[])} instead
*/
public Delete deleteColumn(byte [] column) {
byte [][] parts = KeyValue.parseColumn(column);
this.deleteColumn(parts[0], parts[1], HConstants.LATEST_TIMESTAMP);
return this;
}
}

View File

@ -131,7 +131,7 @@ public class Get implements Writable {
familyMap.put(family, set);
return this;
}
/**
* Get versions of columns only within the specified timestamp range,
* [minStamp, maxStamp).
@ -397,4 +397,38 @@ public class Get implements Writable {
throw new RuntimeException("Can't find class " + className);
}
}
/**
* Adds an array of columns specified the old format, family:qualifier.
* <p>
* Overrides previous calls to addFamily for any families in the input.
* @param columns array of columns, formatted as <pre>family:qualifier</pre>
* @deprecated issue multiple {@link #addColumn(byte[], byte[])} instead
*/
public Get addColumns(byte [][] columns) {
if (columns == null) return this;
for(int i = 0; i < columns.length; i++) {
try {
addColumn(columns[i]);
} catch(Exception e) {}
}
return this;
}
/**
*
* @param column Old format column.
* @return This.
* @deprecated use {@link #addColumn(byte[], byte[])} instead
*/
public Get addColumn(final byte [] column) {
if (column == null) return this;
byte [][] split = KeyValue.parseColumn(column);
if (split.length > 1 && split[1] != null && split[1].length > 0) {
addColumn(split[0], split[1]);
} else {
addFamily(split[0]);
}
return this;
}
}

View File

@ -519,4 +519,18 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
}
}
}
/**
* Add the specified column and value, with the specified timestamp as
* its version to this Put operation.
* @param column Old style column name with family and qualifier put together
* with a colon.
* @param ts version timestamp
* @param value column value
* @deprecated use {@link #add(byte[], byte[], long, byte[])} instead
*/
public Put add(byte [] column, long ts, byte [] value) {
byte [][] parts = KeyValue.parseColumn(column);
return add(parts[0], parts[1], ts, value);
}
}

View File

@ -30,6 +30,7 @@ import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
@ -529,4 +530,91 @@ public class Scan implements Writable {
}
}
}
/**
* Parses a combined family and qualifier and adds either both or just the
* family in case there is not qualifier. This assumes the older colon
* divided notation, e.g. "data:contents" or "meta:".
* <p>
* Note: It will through an error when the colon is missing.
*
* @param familyAndQualifier
* @return A reference to this instance.
* @throws IllegalArgumentException When the colon is missing.
* @deprecated use {@link #addColumn(byte[], byte[])} instead
*/
public Scan addColumn(byte[] familyAndQualifier) {
byte [][] fq = KeyValue.parseColumn(familyAndQualifier);
if (fq.length > 1 && fq[1] != null && fq[1].length > 0) {
addColumn(fq[0], fq[1]);
} else {
addFamily(fq[0]);
}
return this;
}
/**
* Adds an array of columns specified using old format, family:qualifier.
* <p>
* Overrides previous calls to addFamily for any families in the input.
*
* @param columns array of columns, formatted as <pre>family:qualifier</pre>
* @deprecated issue multiple {@link #addColumn(byte[], byte[])} instead
*/
public Scan addColumns(byte [][] columns) {
for (int i = 0; i < columns.length; i++) {
addColumn(columns[i]);
}
return this;
}
/**
* Convenience method to help parse old style (or rather user entry on the
* command line) column definitions, e.g. "data:contents mime:". The columns
* must be space delimited and always have a colon (":") to denote family
* and qualifier.
*
* @param columns The columns to parse.
* @return A reference to this instance.
* @deprecated use {@link #addColumn(byte[], byte[])} instead
*/
public Scan addColumns(String columns) {
String[] cols = columns.split(" ");
for (String col : cols) {
addColumn(Bytes.toBytes(col));
}
return this;
}
/**
* Helps to convert the binary column families and qualifiers to a text
* representation, e.g. "data:mimetype data:contents meta:". Binary values
* are properly encoded using {@link Bytes#toBytesBinary(String)}.
*
* @return The columns in an old style string format.
* @deprecated
*/
public String getInputColumns() {
String cols = "";
for (Map.Entry<byte[], NavigableSet<byte[]>> e :
familyMap.entrySet()) {
byte[] fam = e.getKey();
if (cols.length() > 0) cols += " ";
NavigableSet<byte[]> quals = e.getValue();
// check if this family has qualifiers
if (quals != null && quals.size() > 0) {
String cs = "";
for (byte[] qual : quals) {
if (cs.length() > 0) cs += " ";
// encode values to make parsing easier later
cs += Bytes.toStringBinary(fam) + ":" + Bytes.toStringBinary(qual);
}
cols += cs;
} else {
// only add the family but with old style delimiter
cols += Bytes.toStringBinary(fam) + ":";
}
}
return cols;
}
}

View File

@ -735,23 +735,24 @@ public class HLog implements HConstants, Syncable {
public void run() {
try {
lock.lock();
while(!closed) {
// awaiting with a timeout doesn't always
// throw exceptions on interrupt
while(!this.isInterrupted()) {
// Wait until something has to be synced or do it if we waited enough
// time (useful if something appends but does not sync).
if (!queueEmpty.await(this.optionalFlushInterval,
TimeUnit.MILLISECONDS)) {
// Wait until something has to be hflushed or do it if we waited
// enough time (useful if something appends but does not hflush).
// 0 or less means that it timed out and maybe waited a bit more.
if (!(queueEmpty.awaitNanos(
this.optionalFlushInterval*1000000) <= 0)) {
forceSync = true;
}
// We got the signal, let's syncFS. We currently own the lock so new
// We got the signal, let's hflush. We currently own the lock so new
// writes are waiting to acquire it in addToSyncQueue while the ones
// we sync are waiting on await()
// we hflush are waiting on await()
hflush();
// Release all the clients waiting on the sync. Notice that we still
// Release all the clients waiting on the hflush. Notice that we still
// own the lock until we get back to await at which point all the
// other threads waiting will first acquire and release locks
syncDone.signalAll();
@ -774,7 +775,7 @@ public class HLog implements HConstants, Syncable {
*/
public void addToSyncQueue(boolean force) {
// Don't bother if somehow our append was already synced
// Don't bother if somehow our append was already hflushed
if (unflushedEntries.get() == 0) {
return;
}
@ -786,7 +787,7 @@ public class HLog implements HConstants, Syncable {
// Wake the thread
queueEmpty.signal();
// Wait for it to syncFs
// Wait for it to hflush
syncDone.await();
} catch (InterruptedException e) {
LOG.debug(getName() + " was interrupted while waiting for sync", e);