diff --git a/CHANGES.txt b/CHANGES.txt
index 294de41d722..3569757a9d9 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -10,3 +10,5 @@ Trunk (unreleased changes)
java.util.concurrent.locks.ReentrantLock (Michael Stack)
5. HADOOP-1403. HBase reliability - make master and region server more fault
tolerant.
+ 6. HADOOP-1418. HBase miscellaneous: unit test for HClient, client to do
+ 'Performance Evaluation', etc.
diff --git a/src/java/org/apache/hadoop/hbase/HClient.java b/src/java/org/apache/hadoop/hbase/HClient.java
index f708432e2ae..f9e2b053147 100644
--- a/src/java/org/apache/hadoop/hbase/HClient.java
+++ b/src/java/org/apache/hadoop/hbase/HClient.java
@@ -32,9 +32,9 @@ import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
-/*******************************************************************************
+/**
* HClient manages a connection to a single HRegionServer.
- ******************************************************************************/
+ */
public class HClient implements HConstants {
private final Log LOG = LogFactory.getLog(this.getClass().getName());
@@ -44,7 +44,6 @@ public class HClient implements HConstants {
private static final Text EMPTY_START_ROW = new Text();
- private boolean closed;
private long clientTimeout;
private int numTimeouts;
private int numRetries;
@@ -82,7 +81,6 @@ public class HClient implements HConstants {
/** Creates a new HClient */
public HClient(Configuration conf) {
- this.closed = false;
this.conf = conf;
this.clientTimeout = conf.getLong("hbase.client.timeout.length", 30 * 1000);
@@ -101,15 +99,6 @@ public class HClient implements HConstants {
this.rand = new Random();
}
- /**
- * Check client is open.
- */
- private void checkOpen() {
- if (this.closed) {
- throw new IllegalStateException("client is not open");
- }
- }
-
/**
* Find the address of the master and connect to it
*/
@@ -119,7 +108,8 @@ public class HClient implements HConstants {
}
for(int tries = 0; this.master == null && tries < numRetries; tries++) {
HServerAddress masterLocation =
- new HServerAddress(this.conf.get(MASTER_ADDRESS));
+ new HServerAddress(this.conf.get(MASTER_ADDRESS,
+ DEFAULT_MASTER_ADDRESS));
try {
HMasterInterface tryMaster =
@@ -159,21 +149,18 @@ public class HClient implements HConstants {
throw new IllegalArgumentException(desc.getName().toString()
+ " is a reserved table name");
}
- checkOpen();
checkMaster();
locateRootRegion();
this.master.createTable(desc);
}
public synchronized void deleteTable(Text tableName) throws IOException {
- checkOpen();
checkMaster();
locateRootRegion();
this.master.deleteTable(tableName);
}
public synchronized void shutdown() throws IOException {
- checkOpen();
checkMaster();
this.master.shutdown();
}
@@ -182,7 +169,6 @@ public class HClient implements HConstants {
if(tableName == null || tableName.getLength() == 0) {
throw new IllegalArgumentException("table name cannot be null or zero length");
}
- checkOpen();
this.tableServers = tablesToServers.get(tableName);
if(this.tableServers == null ) { // We don't know where the table is
findTableInMeta(tableName); // Load the information from meta
@@ -322,7 +308,6 @@ public class HClient implements HConstants {
}
if (rootRegionLocation == null) {
- this.closed = true;
throw new IOException("unable to locate root region server");
}
}
@@ -455,14 +440,6 @@ public class HClient implements HConstants {
return server;
}
- /** Close the connection */
- public synchronized void close() throws IOException {
- if(! this.closed) {
- RPC.stopClient();
- this.closed = true;
- }
- }
-
/**
* List all the userspace tables. In other words, scan the META table.
*
@@ -927,7 +904,7 @@ public class HClient implements HConstants {
" createTable webcrawl contents: anchors: 10");
}
- int doCommandLine(final String args[]) {
+ public int doCommandLine(final String args[]) {
// Process command-line args. TODO: Better cmd-line processing
// (but hopefully something not as painful as cli options).
int errCode = -1;
diff --git a/src/java/org/apache/hadoop/hbase/HLocking.java b/src/java/org/apache/hadoop/hbase/HLocking.java
deleted file mode 100644
index 818f9906e18..00000000000
--- a/src/java/org/apache/hadoop/hbase/HLocking.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Copyright 2007 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.util.concurrent.atomic.AtomicInteger;
-
-/*******************************************************************************
- * HLocking is a set of lock primitives that does not rely on a
- * particular thread holding the monitor for an object. This is
- * especially important when a lock must persist over multiple RPC's
- * since there is no guarantee that the same Server thread will handle
- * all the RPC's until the lock is released.
- *
- * For each independent entity that needs locking, create a new
- * HLocking instance.
- *
- ******************************************************************************/
-public class HLocking {
- private Integer mutex;
-
- // If lockers == 0, the lock is unlocked
- // If lockers > 0, locked for read
- // If lockers == -1 locked for write
-
- private AtomicInteger lockers;
-
- /** Constructor */
- public HLocking() {
- this.mutex = new Integer(0);
- this.lockers = new AtomicInteger(0);
- }
-
- /**
- * Caller needs the nonexclusive read-lock
- */
- public void obtainReadLock() {
- synchronized(mutex) {
- while(lockers.get() < 0) {
- try {
- mutex.wait();
- } catch(InterruptedException ie) {
- }
- }
- lockers.incrementAndGet();
- mutex.notifyAll();
- }
- }
-
- /**
- * Caller is finished with the nonexclusive read-lock
- */
- public void releaseReadLock() {
- synchronized(mutex) {
- if(lockers.decrementAndGet() < 0) {
- throw new IllegalStateException("lockers: " + lockers);
- }
- mutex.notifyAll();
- }
- }
-
- /**
- * Caller needs the exclusive write-lock
- */
- public void obtainWriteLock() {
- synchronized(mutex) {
- while(!lockers.compareAndSet(0, -1)) {
- try {
- mutex.wait();
- } catch (InterruptedException ie) {
- }
- }
- mutex.notifyAll();
- }
- }
-
- /**
- * Caller is finished with the write lock
- */
- public void releaseWriteLock() {
- synchronized(mutex) {
- if(!lockers.compareAndSet(-1, 0)) {
- throw new IllegalStateException("lockers: " + lockers);
- }
- mutex.notifyAll();
- }
- }
-}
diff --git a/src/test/org/apache/hadoop/hbase/EvaluationClient.java b/src/test/org/apache/hadoop/hbase/EvaluationClient.java
new file mode 100644
index 00000000000..94dbef0d501
--- /dev/null
+++ b/src/test/org/apache/hadoop/hbase/EvaluationClient.java
@@ -0,0 +1,414 @@
+/**
+ * Copyright 2006 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+import java.util.Random;
+import java.util.TreeMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.log4j.Logger;
+
+/**
+ * Client used evaluating HBase performance and scalability. Steps through
+ * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test,
+ * a random writes test, etc.). Pass on the command-line which test to run,
+ * how many clients are participating in this experiment, and the row range
+ * this client instance is to operate on. Run
+ * java EvaluationClient --help
to obtain usage.
+ *
+ *
This class implements the client used in the
+ * Performance Evaluation benchmarks described in Section 7 of the Bigtable
+ * paper on pages 8-10.
+ */
+public class EvaluationClient implements HConstants {
+ private final Logger LOG = Logger.getLogger(this.getClass().getName());
+
+ private static final int ROW_LENGTH = 1024;
+
+
+ private static final int ONE_HUNDRED_MB = 1024 * 1024 * 1 /*100 RESTORE*/;
+ private static final int ROWS_PER_100_MB = ONE_HUNDRED_MB / ROW_LENGTH;
+
+ private static final int ONE_GB = ONE_HUNDRED_MB * 10;
+ private static final int ROWS_PER_GB = ONE_GB / ROW_LENGTH;
+
+ private static final Text COLUMN_NAME = new Text(COLUMN_FAMILY + "data");
+
+ private static HTableDescriptor tableDescriptor;
+
+ static {
+ tableDescriptor = new HTableDescriptor("TestTable", 1);
+ tableDescriptor.addFamily(COLUMN_FAMILY);
+ }
+
+ private static enum Test {RANDOM_READ,
+ RANDOM_READ_MEM,
+ RANDOM_WRITE,
+ SEQUENTIAL_READ,
+ SEQUENTIAL_WRITE,
+ SCAN};
+
+ private Random rand;
+ private Configuration conf;
+ private HClient client;
+ private boolean miniCluster;
+ private int N; // Number of clients and HRegionServers
+ private int range; // Row range for this client
+ private int R; // Total number of rows
+
+ private EvaluationClient() {
+ this.rand = new Random(System.currentTimeMillis());
+ this.conf = new HBaseConfiguration();
+ this.miniCluster = false;
+ this.client = new HClient(conf);
+ this.N = 1; // Default is one client
+ this.range = 0; // Range for this client
+ this.R = ROWS_PER_GB; // Default for one client
+ }
+
+ private byte[] generateValue() {
+ StringBuilder val = new StringBuilder();
+ while(val.length() < ROW_LENGTH) {
+ val.append(Long.toString(rand.nextLong()));
+ }
+ return val.toString().getBytes();
+ }
+
+ private long randomRead(int startRow, int nRows) throws Exception {
+ LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+ client.openTable(tableDescriptor.getName());
+
+ long startTime = System.currentTimeMillis();
+ int lastRow = startRow + nRows;
+ for(int i = startRow; i < lastRow; i++) {
+ client.get(new Text(Integer.toString(rand.nextInt() % R)), COLUMN_NAME);
+ }
+ return System.currentTimeMillis() - startTime;
+ }
+
+ private long randomWrite(int startRow, int nRows) throws Exception {
+ LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+ client.openTable(tableDescriptor.getName());
+
+ long startTime = System.currentTimeMillis();
+ int lastRow = startRow + nRows;
+ for(int i = startRow; i < lastRow; i++) {
+ long lockid = client.startUpdate(new Text(Integer.toString(rand.nextInt() % R)));
+ client.put(lockid, COLUMN_NAME, generateValue());
+ client.commit(lockid);
+ }
+ return System.currentTimeMillis() - startTime;
+ }
+
+ private long scan(int startRow, int nRows) throws Exception {
+ LOG.info("startRow: " + startRow + ", nRows: " + nRows);
+ client.openTable(tableDescriptor.getName());
+
+ HScannerInterface scanner = client.obtainScanner(new Text[] { COLUMN_NAME },
+ new Text(Integer.toString(startRow)));
+
+ long startTime = System.currentTimeMillis();
+
+ try {
+ int lastRow = startRow + nRows;
+ HStoreKey key = new HStoreKey();
+ TreeMap