From e6e11eae014c6b214de56d52509ad639cc9b7d8e Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Fri, 11 Jul 2008 21:43:23 +0000 Subject: [PATCH] HBASE-738 overview.html in need of updating git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@676090 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES.txt | 1 + src/java/overview.html | 129 +++++++++++++++++++++-------------------- 2 files changed, 68 insertions(+), 62 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 8afdd14c184..17891b90313 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -283,6 +283,7 @@ Trunk (unreleased changes) (Jean-Daniel Cryans via Stack) HBASE-730 On startup, rinse STARTCODE and SERVER from .META. (Jean-Daniel Cryans via Stack) + HBASE-738 overview.html in need of updating (Izaak Rubin via Stack) NEW FEATURES HBASE-47 Option to set TTL for columns in hbase diff --git a/src/java/overview.html b/src/java/overview.html index 6198673e61f..77b1ccf8ae8 100644 --- a/src/java/overview.html +++ b/src/java/overview.html @@ -28,7 +28,8 @@ An example of such an HDFS client configuration is dfs.replication. If for example, -you want to run with a replication factor of 5, hbase will make files will create files with -the default of 3 unless you do the above to make the configuration available to hbase. +you want to run with a replication factor of 5, hbase will create files with the default of 3 unless +you do the above to make the configuration available to hbase.

Running and Confirming Your Installation

@@ -125,7 +138,7 @@ the local filesystem.

If you are running a distributed cluster you will need to start the Hadoop DFS daemons before starting HBase and stop the daemons after HBase has shut down. Start and stop the Hadoop DFS daemons by running ${HADOOP_HOME}/bin/start-dfs.sh. -Ensure it started properly by testing the put and get of files into the Hadoop filesystem. +You can ensure it started properly by testing the put and get of files into the Hadoop filesystem. HBase does not normally use the mapreduce daemons. These do not need to be started.

Start HBase with the following command: @@ -169,14 +182,12 @@ the HBase version. It does not change your install unless you explicitly ask it

+import java.io.IOException;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HStoreKey;
-import org.apache.hadoop.hbase.HScannerInterface;
+import org.apache.hadoop.hbase.client.Scanner;
 import org.apache.hadoop.hbase.io.BatchUpdate;
 import org.apache.hadoop.hbase.io.Cell;
-import org.apache.hadoop.io.Text;
-import java.io.IOException;
+import org.apache.hadoop.hbase.io.RowResult;
 
 public class MyClient {
 
@@ -187,12 +198,12 @@ public class MyClient {
 
     // This instantiates an HTable object that connects you to the "myTable"
     // table. 
-    HTable table = new HTable(config, new Text("myTable"));
+    HTable table = new HTable(config, "myTable");
 
     // To do any sort of update on a row, you use an instance of the BatchUpdate
     // class. A BatchUpdate takes a row and optionally a timestamp which your
     // updates will affect. 
-    BatchUpdate batchUpdate = new BatchUpdate(new Text("myRow"));
+    BatchUpdate batchUpdate = new BatchUpdate("myRow");
 
     // The BatchUpdate#put method takes a Text that describes what cell you want
     // to put a value into, and a byte array that is the value you want to 
@@ -200,11 +211,11 @@ public class MyClient {
     // from the string for HBase to understand how to store it. (The same goes
     // for primitives like ints and longs and user-defined classes - you must 
     // find a way to reduce it to bytes.)
-    batchUpdate.put(new Text("myColumnFamily:columnQualifier1"), 
+    batchUpdate.put("myColumnFamily:columnQualifier1", 
       "columnQualifier1 value!".getBytes());
 
     // Deletes are batch operations in HBase as well. 
-    batchUpdate.delete(new Text("myColumnFamily:cellIWantDeleted"));
+    batchUpdate.delete("myColumnFamily:cellIWantDeleted");
 
     // Once you've done all the puts you want, you need to commit the results.
     // The HTable#commit method takes the BatchUpdate instance you've been 
@@ -216,44 +227,38 @@ public class MyClient {
     // the timestamp the value was stored with. If you happen to know that the 
     // value contained is a string and want an actual string, then you must 
     // convert it yourself.
-    Cell cell = table.get(new Text("myRow"), 
-      new Text("myColumnFamily:columnQualifier1"));
-    String valueStr = new String(valueBytes.getValue());
+    Cell cell = table.get("myRow", "myColumnFamily:columnQualifier1");
+    String valueStr = new String(cell.getValue());
     
     // Sometimes, you won't know the row you're looking for. In this case, you
     // use a Scanner. This will give you cursor-like interface to the contents
     // of the table.
-    HStoreKey row = new HStoreKey();
-    SortedMap columns = new TreeMap();
-    HScannerInterface scanner = 
+    Scanner scanner = 
       // we want to get back only "myColumnFamily:columnQualifier1" when we iterate
-      table.obtainScanner(new Text[]{new Text("myColumnFamily:columnQualifier1")}, 
-      // we want to start scanning from an empty Text, meaning the beginning of
-      // the table
-      new Text(""));
+      table.getScanner(new String[]{"myColumnFamily:columnQualifier1"});
     
     
     // Scanners in HBase 0.2 return RowResult instances. A RowResult is like the
     // row key and the columns all wrapped up in a single interface. 
     // RowResult#getRow gives you the row key. RowResult also implements 
-    // Map, so you can get to your column results easily. 
+    // Map, so you can get to your column results easily. 
     
     // Now, for the actual iteration. One way is to use a while loop like so:
     RowResult rowResult = scanner.next();
     
     while(rowResult != null) {
       // print out the row we found and the columns we were looking for
-      System.out.println("Found row: " + rowResult.getRow() + " with value: " +
-       new String(rowResult.get("myColumnFamily:columnQualifier1")));
+      System.out.println("Found row: " + new String(rowResult.getRow()) + " with value: " +
+       rowResult.get("myColumnFamily:columnQualifier1".getBytes()));
       
       rowResult = scanner.next();
     }
     
     // The other approach is to use a foreach loop. Scanners are iterable!
-    for (RowResult rowResult : scanner) {
+    for (RowResult result : scanner) {
       // print out the row we found and the columns we were looking for
-      System.out.println("Found row: " + rowResult.getRow() + " with value: " +
-       new String(rowResult.get("myColumnFamily:columnQualifier1")));
+      System.out.println("Found row: " + new String(result.getRow()) + " with value: " +
+       result.get("myColumnFamily:columnQualifier1".getBytes()));
     }
     
     // Make sure you close your scanners when you are done!