HBASE-601 Just remove deprecated methods in HTable; 0.2 is not backward compatible anyways

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@652054 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-04-28 04:58:09 +00:00
parent 495467870b
commit 21da84cfea
5 changed files with 29 additions and 337 deletions

View File

@ -3,6 +3,8 @@ Hbase Change Log
INCOMPATIBLE CHANGES
HBASE-584 Names in the filter interface are confusing (Clint Morgan via
Jim Kellerman) (API change for filters)
HBASE-601 Just remove deprecated methods in HTable; 0.2 is not backward
compatible anyways
BUG FIXES
HBASE-574 HBase does not load hadoop native libs (Rong-En Fan via Stack)

View File

@ -25,34 +25,27 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Map.Entry;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.filter.StopRowFilter;
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
/**
* Used to communicate with a single HBase table
@ -216,9 +209,8 @@ public class HTable implements HConstants {
if (info.isOffline() || info.isSplit()) {
continue;
} else {
keyList.add(info.getStartKey());
}
keyList.add(info.getStartKey());
}
// close that remote scanner
@ -508,106 +500,6 @@ public class HTable implements HConstants {
return new ClientScanner(columns, startRow, timestamp, filter);
}
/**
* Start an atomic row insertion/update. No changes are committed until the
* call to commit() returns. A call to abort() will abandon any updates in
* progress.
*
* <p>
* Example:
* <br>
* <pre><span style="font-family: monospace;">
* long lockid = table.startUpdate(new Text(article.getName()));
* for (File articleInfo: article.listFiles(new NonDirectories())) {
* String article = null;
* try {
* DataInputStream in = new DataInputStream(new FileInputStream(articleInfo));
* article = in.readUTF();
* } catch (IOException e) {
* // Input error - abandon update
* table.abort(lockid);
* throw e;
* }
* try {
* table.put(lockid, columnName(articleInfo.getName()), article.getBytes());
* } catch (RuntimeException e) {
* // Put failed - abandon update
* table.abort(lockid);
* throw e;
* }
* }
* table.commit(lockid);
* </span></pre>
*
*
* @param row Name of row to start update against. Note, choose row names
* with care. Rows are sorted lexicographically (comparison is done
* using {@link Text#compareTo(Object)}. If your keys are numeric,
* lexicographic sorting means that 46 sorts AFTER 450 (If you want to use
* numerics for keys, zero-pad).
* @return Row lock id..
* @see #commit(long)
* @see #commit(long, long)
* @see #abort(long)
*/
@Deprecated
public synchronized long startUpdate(final Text row) {
updateInProgress(false);
batch.set(new BatchUpdate(row));
return 1;
}
/**
* Update a value for the specified column.
* Runs {@link #abort(long)} if exception thrown.
*
* @param lockid lock id returned from startUpdate
* @param column column whose value is being set
* @param val new value for column. Cannot be null.
*/
@Deprecated
public void put(long lockid, Text column, byte val[]) {
if (lockid != 1) {
throw new IllegalArgumentException("Invalid lock id!");
}
if (val == null) {
throw new IllegalArgumentException("value cannot be null");
}
updateInProgress(true);
batch.get().put(column, val);
}
/**
* Update a value for the specified column.
* Runs {@link #abort(long)} if exception thrown.
*
* @param lockid lock id returned from startUpdate
* @param column column whose value is being set
* @param val new value for column. Cannot be null.
* @throws IOException throws this if the writable can't be
* converted into a byte array
*/
@Deprecated
public void put(long lockid, Text column, Writable val) throws IOException {
put(lockid, column, Writables.getBytes(val));
}
/**
* Delete the value for a column.
* Deletes the cell whose row/column/commit-timestamp match those of the
* delete.
* @param lockid lock id returned from startUpdate
* @param column name of column whose value is to be deleted
*/
@Deprecated
public void delete(long lockid, Text column) {
if (lockid != 1) {
throw new IllegalArgumentException("Invalid lock id!");
}
updateInProgress(true);
batch.get().delete(column);
}
/**
* Delete all cells that match the passed row and column.
* @param row Row to update
@ -694,66 +586,7 @@ public class HTable implements HConstants {
public void deleteFamily(final Text row, final Text family) throws IOException{
deleteFamily(row, family, HConstants.LATEST_TIMESTAMP);
}
/**
* Abort a row mutation.
*
* This method should be called only when an update has been started and it
* is determined that the update should not be committed.
*
* Releases resources being held by the update in progress.
*
* @param lockid lock id returned from startUpdate
*/
@Deprecated
public synchronized void abort(long lockid) {
if (lockid != 1) {
throw new IllegalArgumentException("Invalid lock id!");
}
batch.set(null);
}
/**
* Finalize a row mutation.
*
* When this method is specified, we pass the server a value that says use
* the 'latest' timestamp. If we are doing a put, on the server-side, cells
* will be given the servers's current timestamp. If the we are commiting
* deletes, then delete removes the most recently modified cell of stipulated
* column.
*
* @see #commit(long, long)
*
* @param lockid lock id returned from startUpdate
* @throws IOException
*/
@Deprecated
public void commit(long lockid) throws IOException {
commit(lockid, LATEST_TIMESTAMP);
}
/**
* Finalize a row mutation and release any resources associated with the update.
*
* @param lockid lock id returned from startUpdate
* @param timestamp time to associate with the change
* @throws IOException
*/
@Deprecated
public void commit(long lockid, final long timestamp)
throws IOException {
updateInProgress(true);
if (lockid != 1) {
throw new IllegalArgumentException("Invalid lock id!");
}
try {
batch.get().setTimestamp(timestamp);
commit(batch.get());
} finally {
batch.set(null);
}
}
/**
* Commit a BatchUpdate to the table.
* @param batchUpdate
@ -778,7 +611,6 @@ public class HTable implements HConstants {
* through them all.
*/
protected class ClientScanner implements Scanner {
private final Text EMPTY_COLUMN = new Text();
private Text[] columns;
private Text startRow;
private long scanTime;
@ -949,9 +781,8 @@ public class HTable implements HConstants {
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
return true;
}
return true;
}
// get the pending next item and advance the iterator. returns null if
@ -1020,18 +851,16 @@ public class HTable implements HConstants {
" attempts.\n";
int i = 1;
for (IOException e2 : exceptions) {
message = message + "Exception " + i + ":\n" + e;
message = message + "Exception " + i++ + ":\n" + e2;
}
LOG.debug(message);
}
throw e;
} else {
if (LOG.isDebugEnabled()) {
exceptions.add(e);
LOG.debug("reloading table servers because: " + e.getMessage());
}
}
if (LOG.isDebugEnabled()) {
exceptions.add(e);
LOG.debug("reloading table servers because: " + e.getMessage());
}
} catch (Exception e) {
throw new RuntimeException(e);
}
@ -1043,12 +872,4 @@ public class HTable implements HConstants {
}
return null;
}
/**
* Does nothing anymore
*/
@Deprecated
public void close() {
// do nothing...
}
}

View File

@ -424,14 +424,8 @@ public class ThriftServer {
}
table.commit(batchUpdate);
} catch (IOException e) {
if (lockid != null) {
table.abort(lockid);
}
throw new IOError(e.getMessage());
} catch (IllegalArgumentException e) {
if (lockid != null) {
table.abort(lockid);
}
throw new IllegalArgument(e.getMessage());
}
}

View File

@ -22,15 +22,15 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.io.Text;
/**
* Test batch updates
@ -68,34 +68,14 @@ public class TestBatchUpdate extends HBaseClusterTestCase {
* @throws IOException
*/
public void testBatchUpdate() throws IOException {
try {
table.commit(-1L);
} catch (IllegalStateException e) {
// expected
} catch (Exception e) {
e.printStackTrace();
fail();
}
BatchUpdate bu = new BatchUpdate(new Text("row1"));
bu.put(CONTENTS, value);
bu.delete(CONTENTS);
table.commit(bu);
long lockid = table.startUpdate(new Text("row1"));
try {
@SuppressWarnings("unused")
long dummy = table.startUpdate(new Text("row2"));
} catch (IllegalStateException e) {
// expected
} catch (Exception e) {
e.printStackTrace();
fail();
}
table.put(lockid, CONTENTS, value);
table.delete(lockid, CONTENTS);
table.commit(lockid);
lockid = table.startUpdate(new Text("row2"));
table.put(lockid, CONTENTS, value);
table.commit(lockid);
bu = new BatchUpdate(new Text("row2"));
bu.put(CONTENTS, value);
table.commit(bu);
Text[] columns = { CONTENTS };
Scanner scanner = table.getScanner(columns, new Text());

View File

@ -1,105 +0,0 @@
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HColumnDescriptor;
/**
* Tests that HClient protects against multiple updates. This test case will
* be removed at the same time that HTable.startuUpdate and friends are
* removed.
*/
@Deprecated
public class TestMultipleUpdates extends HBaseClusterTestCase {
private static final String CONTENTS_STR = "contents:";
private static final Text CONTENTS = new Text(CONTENTS_STR);
private static final byte[] value = { 1, 2, 3, 4 };
private HTableDescriptor desc = null;
private HTable table = null;
/**
* {@inheritDoc}
*/
@Override
public void setUp() throws Exception {
super.setUp();
this.desc = new HTableDescriptor("test");
desc.addFamily(new HColumnDescriptor(CONTENTS_STR));
HBaseAdmin admin = new HBaseAdmin(conf);
admin.createTable(desc);
table = new HTable(conf, desc.getName());
}
/** the test */
public void testMultipleUpdates() {
try {
long lockid = table.startUpdate(new Text("row1"));
try {
long lockid2 = table.startUpdate(new Text("row2"));
throw new Exception("second startUpdate returned lock id " + lockid2);
} catch (IllegalStateException i) {
// expected
}
long invalidid = 42;
try {
table.put(invalidid, CONTENTS, value);
} catch (IllegalArgumentException i) {
// expected
}
try {
table.delete(invalidid, CONTENTS);
} catch (IllegalArgumentException i) {
// expected
}
try {
table.abort(invalidid);
} catch (IllegalArgumentException i) {
// expected
}
try {
table.commit(invalidid);
} catch (IllegalArgumentException i) {
// expected
}
table.abort(lockid);
} catch (Exception e) {
System.err.println("unexpected exception");
e.printStackTrace();
fail();
}
}
}