From dc6e91f6153767adb40749ff39f204f21961f36d Mon Sep 17 00:00:00 2001 From: Bryan Duxbury Date: Wed, 12 Mar 2008 15:41:54 +0000 Subject: [PATCH] HBASE-430 Performance: Scanners and getRow return maps with duplicate data -HRegionInterface's next method now returns RowResult objects -All direct consumers of HRegionInterface make use of RowResults git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@636373 13f79535-47bb-0310-9956-ffa450edef68 --- src/java/org/apache/hadoop/hbase/HMerge.java | 5 +- .../hadoop/hbase/client/HBaseAdmin.java | 30 ++-- .../hbase/client/HConnectionManager.java | 29 ++- .../apache/hadoop/hbase/client/HTable.java | 56 +++--- .../hadoop/hbase/io/HbaseObjectWritable.java | 2 + .../org/apache/hadoop/hbase/io/RowResult.java | 168 ++++++++++++++++++ .../hadoop/hbase/ipc/HRegionInterface.java | 3 +- .../hadoop/hbase/master/BaseScanner.java | 30 ++-- .../apache/hadoop/hbase/master/HMaster.java | 22 +-- .../hbase/master/ProcessServerShutdown.java | 14 +- .../hadoop/hbase/master/TableOperation.java | 16 +- .../hadoop/hbase/regionserver/HRegion.java | 7 +- .../hbase/regionserver/HRegionServer.java | 14 +- .../hadoop/hbase/regionserver/HStore.java | 6 +- .../apache/hadoop/hbase/util/Writables.java | 32 ++++ .../hadoop/hbase/DisabledTestScanner2.java | 18 +- .../hadoop/hbase/regionserver/TestGet.java | 2 +- 17 files changed, 316 insertions(+), 138 deletions(-) create mode 100644 src/java/org/apache/hadoop/hbase/io/RowResult.java diff --git a/src/java/org/apache/hadoop/hbase/HMerge.java b/src/java/org/apache/hadoop/hbase/HMerge.java index 684f043922e..ea493181819 100644 --- a/src/java/org/apache/hadoop/hbase/HMerge.java +++ b/src/java/org/apache/hadoop/hbase/HMerge.java @@ -335,8 +335,9 @@ class HMerge implements HConstants, Tool { root = new HRegion(rootTableDir, hlog, fs, conf, HRegionInfo.rootRegionInfo, null, null); - HScannerInterface rootScanner = root.getScanner(COL_REGIONINFO_ARRAY, - new Text(), System.currentTimeMillis(), null); + HScannerInterface rootScanner = + root.getScanner(COL_REGIONINFO_ARRAY, new Text(), + HConstants.LATEST_TIMESTAMP, null); try { HStoreKey key = new HStoreKey(); diff --git a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index dc27d12d8f1..084415726af 100644 --- a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -43,6 +42,8 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.io.RowResult; +import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.ipc.HRegionInterface; @@ -201,16 +202,15 @@ public class HBaseAdmin implements HConstants { scannerId = server.openScanner(firstMetaServer.getRegionInfo().getRegionName(), COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null); - HbaseMapWritable values = server.next(scannerId); + RowResult values = server.next(scannerId); if (values == null || values.size() == 0) { break; } boolean found = false; - for (Map.Entry e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - if (key.getColumn().equals(COL_REGIONINFO)) { + for (Map.Entry e: values.entrySet()) { + if (e.getKey().equals(COL_REGIONINFO)) { info = (HRegionInfo) Writables.getWritable( - ((ImmutableBytesWritable) e.getValue()).get(), info); + e.getValue().getValue(), info); if (info.getTableDesc().getName().equals(tableName)) { found = true; @@ -285,7 +285,7 @@ public class HBaseAdmin implements HConstants { boolean isenabled = false; while (true) { - HbaseMapWritable values = server.next(scannerId); + RowResult values = server.next(scannerId); if (values == null || values.size() == 0) { if (valuesfound == 0) { throw new NoSuchElementException( @@ -294,11 +294,10 @@ public class HBaseAdmin implements HConstants { break; } valuesfound += 1; - for (Map.Entry e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - if (key.getColumn().equals(COL_REGIONINFO)) { + for (Map.Entry e: values.entrySet()) { + if (e.getKey().equals(COL_REGIONINFO)) { info = (HRegionInfo) Writables.getWritable( - ((ImmutableBytesWritable) e.getValue()).get(), info); + e.getValue().getValue(), info); isenabled = !info.isOffline(); break; @@ -386,7 +385,7 @@ public class HBaseAdmin implements HConstants { boolean disabled = false; while (true) { - HbaseMapWritable values = server.next(scannerId); + RowResult values = server.next(scannerId); if (values == null || values.size() == 0) { if (valuesfound == 0) { throw new NoSuchElementException("table " + tableName + " not found"); @@ -394,11 +393,10 @@ public class HBaseAdmin implements HConstants { break; } valuesfound += 1; - for (Map.Entry e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - if (key.getColumn().equals(COL_REGIONINFO)) { + for (Map.Entry e: values.entrySet()) { + if (e.getKey().equals(COL_REGIONINFO)) { info = (HRegionInfo) Writables.getWritable( - ((ImmutableBytesWritable) e.getValue()).get(), info); + e.getValue().getValue(), info); disabled = info.isOffline(); break; diff --git a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java index c22a1c6d3ea..da3a2fa78ee 100644 --- a/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/src/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; /** * A non-instantiable class that manages connections to multiple tables in @@ -265,27 +266,21 @@ public class HConnectionManager implements HConstants { // open a scanner over the meta region scannerId = server.openScanner( metaLocation.getRegionInfo().getRegionName(), - COLUMN_FAMILY_ARRAY, startRow, LATEST_TIMESTAMP, - null); + new Text[]{COL_REGIONINFO}, startRow, LATEST_TIMESTAMP, null); // iterate through the scanner, accumulating unique table names while (true) { - HbaseMapWritable values = server.next(scannerId); + RowResult values = server.next(scannerId); if (values == null || values.size() == 0) { break; } - for (Map.Entry e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - if (key.getColumn().equals(COL_REGIONINFO)) { - HRegionInfo info = new HRegionInfo(); - info = (HRegionInfo) Writables.getWritable( - ((ImmutableBytesWritable) e.getValue()).get(), info); + + HRegionInfo info = + Writables.getHRegionInfo(values.get(COL_REGIONINFO)); - // Only examine the rows where the startKey is zero length - if (info.getStartKey().getLength() == 0) { - uniqueTables.add(info.getTableDesc()); - } - } + // Only examine the rows where the startKey is zero length + if (info.getStartKey().getLength() == 0) { + uniqueTables.add(info.getTableDesc()); } } @@ -447,11 +442,9 @@ public class HConnectionManager implements HConstants { throw new IllegalStateException("region offline: " + regionInfo.getRegionName()); } - - Cell serverValue = results.get(COL_SERVER); - String serverAddress = Writables.bytesToString( - serverValue == null ? null : serverValue.getValue()); + String serverAddress = + Writables.cellToString(results.get(COL_SERVER)); if (serverAddress.equals("")) { throw new NoServerForRegionException( diff --git a/src/java/org/apache/hadoop/hbase/client/HTable.java b/src/java/org/apache/hadoop/hbase/client/HTable.java index d3ac9668f5b..41382618e37 100644 --- a/src/java/org/apache/hadoop/hbase/client/HTable.java +++ b/src/java/org/apache/hadoop/hbase/client/HTable.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.filter.StopRowFilter; import org.apache.hadoop.hbase.filter.WhileMatchRowFilter; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Writables; @@ -198,36 +199,28 @@ public class HTable implements HConstants { // open a scanner over the meta region scannerId = server.openScanner( metaLocation.getRegionInfo().getRegionName(), - COLUMN_FAMILY_ARRAY, tableName, LATEST_TIMESTAMP, + new Text[]{COL_REGIONINFO}, tableName, LATEST_TIMESTAMP, null); - // iterate through the scanner, accumulating unique table names - SCANNER_LOOP: while (true) { - HbaseMapWritable values = server.next(scannerId); + // iterate through the scanner, accumulating unique region names + while (true) { + RowResult values = server.next(scannerId); if (values == null || values.size() == 0) { break; } - for (Map.Entry e: values.entrySet()) { - HStoreKey key = (HStoreKey) e.getKey(); - if (key.getColumn().equals(COL_REGIONINFO)) { - HRegionInfo info = new HRegionInfo(); - info = (HRegionInfo) Writables.getWritable( - ((ImmutableBytesWritable) e.getValue()).get(), info); - - if (!info.getTableDesc().getName().equals(this.tableName)) { - break SCANNER_LOOP; - } - - if (info.isOffline()) { - continue SCANNER_LOOP; - } - - if (info.isSplit()) { - continue SCANNER_LOOP; - } - - keyList.add(info.getStartKey()); - } + + HRegionInfo info = new HRegionInfo(); + info = (HRegionInfo) Writables.getWritable( + values.get(COL_REGIONINFO).getValue(), info); + + if (!info.getTableDesc().getName().equals(this.tableName)) { + break; + } + + if (info.isOffline() || info.isSplit()) { + continue; + } else { + keyList.add(info.getStartKey()); } } @@ -889,7 +882,7 @@ public class HTable implements HConstants { if (this.closed) { return false; } - HbaseMapWritable values = null; + RowResult values = null; // Clear the results so we don't inherit any values from any previous // calls to next. results.clear(); @@ -898,13 +891,12 @@ public class HTable implements HConstants { } while (values != null && values.size() == 0 && nextScanner()); if (values != null && values.size() != 0) { - for (Map.Entry e: values.entrySet()) { - HStoreKey k = (HStoreKey) e.getKey(); - key.setRow(k.getRow()); - key.setVersion(k.getTimestamp()); + for (Map.Entry e: values.entrySet()) { + // HStoreKey k = (HStoreKey) e.getKey(); + key.setRow(values.getRow()); + key.setVersion(e.getValue().getTimestamp()); key.setColumn(EMPTY_COLUMN); - results.put(k.getColumn(), - ((ImmutableBytesWritable) e.getValue()).get()); + results.put(e.getKey(), e.getValue().getValue()); } } return values == null ? false : values.size() != 0; diff --git a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java index 8e1104bf5fc..758243791a3 100644 --- a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java +++ b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java @@ -44,6 +44,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; /** * This is a customized version of the polymorphic hadoop @@ -120,6 +121,7 @@ public class HbaseObjectWritable implements Writable, Configurable { } catch (ClassNotFoundException e) { e.printStackTrace(); } + addToMap(RowResult.class, code++); } private Class declaredClass; diff --git a/src/java/org/apache/hadoop/hbase/io/RowResult.java b/src/java/org/apache/hadoop/hbase/io/RowResult.java new file mode 100644 index 00000000000..8d6393f2df6 --- /dev/null +++ b/src/java/org/apache/hadoop/hbase/io/RowResult.java @@ -0,0 +1,168 @@ +/** + * Copyright 2008 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.HashSet; +import java.util.Collection; + +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +public class RowResult implements Writable, Map { + protected Text row; + protected HbaseMapWritable cells; + + /** + * Used by Writable + */ + public RowResult () { + row = new Text(); + cells = new HbaseMapWritable(); + } + + /** + * Create a RowResult from a row and Cell map + */ + public RowResult (final Text row, final HbaseMapWritable hbw) { + this.row = row; + this.cells = hbw; + } + + /** + * Get the row for this RowResult + */ + public Text getRow() { + return row; + } + + // + // Map interface + // + + public Cell put(Text key, Cell value) { + throw new UnsupportedOperationException("RowResult is read-only!"); + } + + public void putAll(Map map) { + throw new UnsupportedOperationException("RowResult is read-only!"); + } + + public Cell get(Object key) { + return (Cell)cells.get(key); + } + + public Cell remove(Object key) { + throw new UnsupportedOperationException("RowResult is read-only!"); + } + + public boolean containsKey(Object key) { + return cells.containsKey(key); + } + + public boolean containsValue(Object value) { + throw new UnsupportedOperationException("Don't support containsValue!"); + } + + public boolean isEmpty() { + return cells.isEmpty(); + } + + public int size() { + return cells.size(); + } + + public void clear() { + throw new UnsupportedOperationException("RowResult is read-only!"); + } + + public Set keySet() { + Set result = new HashSet(); + for (Writable w : cells.keySet()) { + result.add((Text)w); + } + return result; + } + + public Set> entrySet() { + Set> result = new HashSet>(); + for (Map.Entry e : cells.entrySet()) { + result.add(new Entry((Text)e.getKey(), (Cell)e.getValue())); + } + return result; + } + + public Collection values() { + ArrayList result = new ArrayList(); + for (Writable w : cells.values()) { + result.add((Cell)w); + } + return result; + } + + /** + * Get the Cell that corresponds to column + */ + public Cell get(Text column) { + return (Cell)cells.get(column); + } + + public class Entry implements Map.Entry { + private Text row; + private Cell cell; + + Entry(Text row, Cell cell) { + this.row = row; + this.cell = cell; + } + + public Cell setValue(Cell c) { + throw new UnsupportedOperationException("RowResult is read-only!"); + } + + public Text getKey() { + return row; + } + + public Cell getValue() { + return cell; + } + } + + // + // Writable + // + + public void readFields(final DataInput in) throws IOException { + row.readFields(in); + cells.readFields(in); + } + + public void write(final DataOutput out) throws IOException { + row.write(out); + cells.write(out); + } +} diff --git a/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java b/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java index 555c38838fc..e6f1b4c1378 100644 --- a/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java +++ b/src/java/org/apache/hadoop/hbase/ipc/HRegionInterface.java @@ -24,6 +24,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.io.Text; @@ -219,7 +220,7 @@ public interface HRegionInterface extends VersionedProtocol { * @return map of values * @throws IOException */ - public HbaseMapWritable next(long scannerId) throws IOException; + public RowResult next(long scannerId) throws IOException; /** * Close a scanner diff --git a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java index aece0db4f87..b0e80beb1e3 100644 --- a/src/java/org/apache/hadoop/hbase/master/BaseScanner.java +++ b/src/java/org/apache/hadoop/hbase/master/BaseScanner.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStoreFile; @@ -149,8 +150,8 @@ abstract class BaseScanner extends Chore implements HConstants { // Array to hold list of split parents found. Scan adds to list. After // scan we go check if parents can be removed. - Map> splitParents = - new HashMap>(); + Map splitParents = + new HashMap(); try { regionServer = master.connection.getHRegionConnection(region.getServer()); scannerId = @@ -159,22 +160,18 @@ abstract class BaseScanner extends Chore implements HConstants { int numberOfRegionsFound = 0; while (true) { - HbaseMapWritable values = regionServer.next(scannerId); + RowResult values = regionServer.next(scannerId); if (values == null || values.size() == 0) { break; } - // TODO: Why does this have to be a sorted map? - SortedMap results = - RowMap.fromHbaseMapWritable(values).getMap(); - - HRegionInfo info = master.getHRegionInfo(results); + HRegionInfo info = master.getHRegionInfo(values); if (info == null) { continue; } - String serverName = Writables.bytesToString(results.get(COL_SERVER)); - long startCode = Writables.bytesToLong(results.get(COL_STARTCODE)); + String serverName = Writables.cellToString(values.get(COL_SERVER)); + long startCode = Writables.cellToLong(values.get(COL_STARTCODE)); if (LOG.isDebugEnabled()) { LOG.debug(Thread.currentThread().getName() + " regioninfo: {" + info.toString() + "}, server: " + serverName + ", startCode: " + @@ -184,7 +181,7 @@ abstract class BaseScanner extends Chore implements HConstants { // Note Region has been assigned. checkAssigned(info, serverName, startCode); if (isSplitParent(info)) { - splitParents.put(info, results); + splitParents.put(info, values); } numberOfRegionsFound += 1; } @@ -216,8 +213,7 @@ abstract class BaseScanner extends Chore implements HConstants { // Scan is finished. Take a look at split parents to see if any we can // clean up. if (splitParents.size() > 0) { - for (Map.Entry> e: - splitParents.entrySet()) { + for (Map.Entry e : splitParents.entrySet()) { HRegionInfo hri = e.getKey(); cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue()); } @@ -252,8 +248,8 @@ abstract class BaseScanner extends Chore implements HConstants { * @throws IOException */ private boolean cleanupSplits(final Text metaRegionName, - final HRegionInterface srvr, final HRegionInfo parent, - SortedMap rowContent) + final HRegionInterface srvr, final HRegionInfo parent, + RowResult rowContent) throws IOException { boolean result = false; @@ -295,11 +291,11 @@ abstract class BaseScanner extends Chore implements HConstants { */ protected boolean hasReferences(final Text metaRegionName, final HRegionInterface srvr, final Text parent, - SortedMap rowContent, final Text splitColumn) + RowResult rowContent, final Text splitColumn) throws IOException { boolean result = false; HRegionInfo split = - Writables.getHRegionInfoOrNull(rowContent.get(splitColumn)); + Writables.getHRegionInfoOrNull(rowContent.get(splitColumn).getValue()); if (split == null) { return result; } diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java index 381d6660d37..27d2d63cb36 100644 --- a/src/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java @@ -46,6 +46,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.ipc.HbaseRPC; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.InfoServer; @@ -614,18 +616,16 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, long scannerid = srvr.openScanner(metaRegionName, COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null); try { - HbaseMapWritable data = srvr.next(scannerid); + RowResult data = srvr.next(scannerid); // Test data and that the row for the data is for our table. If table // does not exist, scanner will return row after where our table would // be inserted if it exists so look for exact match on table name. if (data != null && data.size() > 0) { - for (Writable k: data.keySet()) { - if (HRegionInfo.getTableNameFromRegionName( - ((HStoreKey) k).getRow()).equals(tableName)) { - // Then a region for this table already exists. Ergo table exists. - throw new TableExistsException(tableName.toString()); - } + if (HRegionInfo.getTableNameFromRegionName( + data.getRow()).equals(tableName)) { + // Then a region for this table already exists. Ergo table exists. + throw new TableExistsException(tableName.toString()); } } } finally { @@ -697,15 +697,15 @@ public class HMaster extends Thread implements HConstants, HMasterInterface, * @return Null or found HRegionInfo. * @throws IOException */ - HRegionInfo getHRegionInfo(final Map map) + HRegionInfo getHRegionInfo(final Map map) throws IOException { - byte [] bytes = map.get(COL_REGIONINFO); - if (bytes == null) { + Cell regioninfo = map.get(COL_REGIONINFO); + if (regioninfo == null) { LOG.warn(COL_REGIONINFO.toString() + " is empty; has keys: " + map.keySet().toString()); return null; } - return (HRegionInfo)Writables.getWritable(bytes, new HRegionInfo()); + return (HRegionInfo)Writables.getWritable(regioninfo.getValue(), new HRegionInfo()); } /* diff --git a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java index 6ce5a019c14..9c493b9e943 100644 --- a/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java +++ b/src/java/org/apache/hadoop/hbase/master/ProcessServerShutdown.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.HLog; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Text; +import org.apache.hadoop.hbase.io.RowResult; /** * Instantiated when a server's lease has expired, meaning it has crashed. @@ -97,7 +98,7 @@ class ProcessServerShutdown extends RegionServerOperation { try { while (true) { - HbaseMapWritable values = null; + RowResult values = null; try { values = server.next(scannerId); } catch (IOException e) { @@ -108,10 +109,9 @@ class ProcessServerShutdown extends RegionServerOperation { if (values == null || values.size() == 0) { break; } - // TODO: Why does this have to be a sorted map? - RowMap rm = RowMap.fromHbaseMapWritable(values); - Text row = rm.getRow(); - SortedMap map = rm.getMap(); + + Text row = values.getRow(); + if (LOG.isDebugEnabled() && row != null) { LOG.debug("shutdown scanner looking at " + row.toString()); } @@ -121,7 +121,7 @@ class ProcessServerShutdown extends RegionServerOperation { // missed edits in hlog because hdfs does not do write-append). String serverName; try { - serverName = Writables.bytesToString(map.get(COL_SERVER)); + serverName = Writables.cellToString(values.get(COL_SERVER)); } catch (UnsupportedEncodingException e) { LOG.error("Server name", e); break; @@ -133,7 +133,7 @@ class ProcessServerShutdown extends RegionServerOperation { } // Bingo! Found it. - HRegionInfo info = master.getHRegionInfo(map); + HRegionInfo info = master.getHRegionInfo(values); if (info == null) { continue; } diff --git a/src/java/org/apache/hadoop/hbase/master/TableOperation.java b/src/java/org/apache/hadoop/hbase/master/TableOperation.java index b007e82b7e2..118124e9c7d 100644 --- a/src/java/org/apache/hadoop/hbase/master/TableOperation.java +++ b/src/java/org/apache/hadoop/hbase/master/TableOperation.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Text; +import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; /** * Abstract base class for operations that need to examine all HRegionInfo @@ -95,19 +97,19 @@ abstract class TableOperation implements HConstants { try { while (true) { - HbaseMapWritable values = server.next(scannerId); + RowResult values = server.next(scannerId); if(values == null || values.size() == 0) { break; } - RowMap rm = RowMap.fromHbaseMapWritable(values); - SortedMap map = rm.getMap(); - HRegionInfo info = this.master.getHRegionInfo(map); + HRegionInfo info = this.master.getHRegionInfo(values); if (info == null) { throw new IOException(COL_REGIONINFO + " not found on " + - rm.getRow()); + values.getRow()); } - String serverName = Writables.bytesToString(map.get(COL_SERVER)); - long startCode = Writables.bytesToLong(map.get(COL_STARTCODE)); + String serverName = + Writables.cellToString(values.get(COL_SERVER)); + long startCode = + Writables.cellToLong(values.get(COL_STARTCODE)); if (info.getTableDesc().getName().compareTo(tableName) > 0) { break; // Beyond any more entries for this table } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index c0b595be77f..453e6718fe2 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.io.BatchOperation; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.WritableUtils; @@ -1204,7 +1205,8 @@ public class HRegion implements HConstants { * @throws IOException */ public HScannerInterface getScanner(Text[] cols, Text firstRow, - long timestamp, RowFilterInterface filter) throws IOException { + long timestamp, RowFilterInterface filter) + throws IOException { lock.readLock().lock(); try { if (this.closed.get()) { @@ -1639,7 +1641,7 @@ public class HRegion implements HConstants { /** Create an HScanner with a handle on many HStores. */ @SuppressWarnings("unchecked") HScanner(Text[] cols, Text firstRow, long timestamp, HStore[] stores, - RowFilterInterface filter) + RowFilterInterface filter) throws IOException { this.scanners = new HInternalScannerInterface[stores.length]; try { @@ -1756,7 +1758,6 @@ public class HRegion implements HConstants { return moreToFollow; } - /** Shut down a single scanner */ void closeScanner(int i) { try { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f6f4edf3e24..a033a730c24 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.Cell; +import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.HbaseMapWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; @@ -1006,8 +1007,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { } /** {@inheritDoc} */ - public HbaseMapWritable next(final long scannerId) throws IOException { - + public RowResult next(final long scannerId) throws IOException { checkOpen(); requestCount.incrementAndGet(); try { @@ -1024,8 +1024,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { TreeMap results = new TreeMap(); while (s.next(key, results)) { for(Map.Entry e: results.entrySet()) { - values.put(new HStoreKey(key.getRow(), e.getKey(), key.getTimestamp()), - new ImmutableBytesWritable(e.getValue())); + values.put(e.getKey(), new Cell(e.getValue(), key.getTimestamp())); } if(values.size() > 0) { @@ -1036,8 +1035,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { // No data for this row, go get another. results.clear(); } - return values; - + return new RowResult(key.getRow(), values); } catch (IOException e) { checkFileSystem(); throw e; @@ -1064,8 +1062,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { /** {@inheritDoc} */ public long openScanner(Text regionName, Text[] cols, Text firstRow, - final long timestamp, final RowFilterInterface filter) - throws IOException { + final long timestamp, final RowFilterInterface filter) + throws IOException { checkOpen(); requestCount.incrementAndGet(); try { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java index f893d66b8a7..592751fe205 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HStoreKey; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.io.Cell; - +import org.apache.hadoop.hbase.io.RowResult; /** * HStore maintains a bunch of data files. It is responsible for maintaining @@ -2252,7 +2252,8 @@ public class HStore implements HConstants { /** Create an Scanner with a handle on the memcache and HStore files. */ @SuppressWarnings("unchecked") HStoreScanner(Text[] targetCols, Text firstRow, long timestamp, - RowFilterInterface filter) throws IOException { + RowFilterInterface filter) + throws IOException { this.dataFilter = filter; if (null != dataFilter) { @@ -2454,7 +2455,6 @@ public class HStore implements HConstants { return moreToFollow; } - /** Shut down a single scanner */ void closeScanner(int i) { diff --git a/src/java/org/apache/hadoop/hbase/util/Writables.java b/src/java/org/apache/hadoop/hbase/util/Writables.java index 7f7e5c5c493..09b6fe0d333 100644 --- a/src/java/org/apache/hadoop/hbase/util/Writables.java +++ b/src/java/org/apache/hadoop/hbase/util/Writables.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.hbase.io.Cell; /** * Utility class with methods for manipulating Writable objects @@ -112,6 +113,19 @@ public class Writables { (HRegionInfo)null: getHRegionInfo(bytes); } + /** + * @param cell Cell object containing the serialized HRegionInfo + * @return A HRegionInfo instance built out of passed cell. + * @throws IOException + */ + public static HRegionInfo getHRegionInfo(final Cell cell) throws IOException { + if (cell == null) { + return null; + } else { + return getHRegionInfo(cell.getValue()); + } + } + /** * Copy one Writable to another. Copies bytes using data streams. * @param src Source Writable @@ -184,4 +198,22 @@ public class Writables { } return new String(bytes, HConstants.UTF8_ENCODING); } + + public static String cellToString(Cell c) + throws UnsupportedEncodingException { + if (c == null) { + return ""; + } else { + return bytesToString(c.getValue()); + } + } + + public static long cellToLong(Cell c) + throws IOException { + if (c == null) { + return 0; + } else { + return bytesToLong(c.getValue()); + } + } } diff --git a/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java b/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java index c5faee7bed1..d4ec30b5ddc 100644 --- a/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java +++ b/src/test/org/apache/hadoop/hbase/DisabledTestScanner2.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.ipc.HRegionInterface; +import org.apache.hadoop.hbase.io.RowResult; /** * Additional scanner tests. @@ -374,26 +375,19 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase { HConstants.COLUMN_FAMILY_ARRAY, new Text(), System.currentTimeMillis(), null); while (true) { - TreeMap results = new TreeMap(); - HbaseMapWritable values = regionServer.next(scannerId); + RowResult values = regionServer.next(scannerId); if (values == null || values.size() == 0) { break; } - for (Map.Entry e: values.entrySet()) { - HStoreKey k = (HStoreKey) e.getKey(); - results.put(k.getColumn(), - ((ImmutableBytesWritable) e.getValue()).get()); - } - HRegionInfo info = (HRegionInfo) Writables.getWritable( - results.get(HConstants.COL_REGIONINFO), new HRegionInfo()); + values.get(HConstants.COL_REGIONINFO).getValue(), new HRegionInfo()); - byte[] bytes = results.get(HConstants.COL_SERVER); - String serverName = Writables.bytesToString(bytes); + String serverName = + Writables.cellToString(values.get(HConstants.COL_SERVER)); long startCode = - Writables.bytesToLong(results.get(HConstants.COL_STARTCODE)); + Writables.cellToLong(values.get(HConstants.COL_STARTCODE)); LOG.info(Thread.currentThread().getName() + " scanner: " + Long.valueOf(scannerId) + ": regioninfo: {" + info.toString() diff --git a/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java b/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java index 6a38306a8ee..bc6b5f63ec0 100644 --- a/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java +++ b/src/test/org/apache/hadoop/hbase/regionserver/TestGet.java @@ -67,7 +67,7 @@ public class TestGet extends HBaseTestCase { for(Iterator i = values.keySet().iterator(); i.hasNext(); ) { Text column = i.next(); if (column.equals(HConstants.COL_SERVER)) { - String server = Writables.bytesToString(values.get(column).getValue()); + String server = Writables.cellToString(values.get(column)); assertEquals(expectedServer, server); LOG.info(server); }