HBASE-430 Performance: Scanners and getRow return maps with duplicate data
-HRegionInterface's next method now returns RowResult objects -All direct consumers of HRegionInterface make use of RowResults git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@636373 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
935d300957
commit
dc6e91f615
|
@ -335,8 +335,9 @@ class HMerge implements HConstants, Tool {
|
|||
root = new HRegion(rootTableDir, hlog, fs, conf,
|
||||
HRegionInfo.rootRegionInfo, null, null);
|
||||
|
||||
HScannerInterface rootScanner = root.getScanner(COL_REGIONINFO_ARRAY,
|
||||
new Text(), System.currentTimeMillis(), null);
|
||||
HScannerInterface rootScanner =
|
||||
root.getScanner(COL_REGIONINFO_ARRAY, new Text(),
|
||||
HConstants.LATEST_TIMESTAMP, null);
|
||||
|
||||
try {
|
||||
HStoreKey key = new HStoreKey();
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.ipc.RemoteException;
|
|||
import org.apache.hadoop.hbase.ipc.HMasterInterface;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -43,6 +42,8 @@ import org.apache.hadoop.hbase.TableNotFoundException;
|
|||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
|
@ -201,16 +202,15 @@ public class HBaseAdmin implements HConstants {
|
|||
scannerId =
|
||||
server.openScanner(firstMetaServer.getRegionInfo().getRegionName(),
|
||||
COL_REGIONINFO_ARRAY, tableName, System.currentTimeMillis(), null);
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
RowResult values = server.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
boolean found = false;
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
if (key.getColumn().equals(COL_REGIONINFO)) {
|
||||
for (Map.Entry<Text, Cell> e: values.entrySet()) {
|
||||
if (e.getKey().equals(COL_REGIONINFO)) {
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
((ImmutableBytesWritable) e.getValue()).get(), info);
|
||||
e.getValue().getValue(), info);
|
||||
|
||||
if (info.getTableDesc().getName().equals(tableName)) {
|
||||
found = true;
|
||||
|
@ -285,7 +285,7 @@ public class HBaseAdmin implements HConstants {
|
|||
boolean isenabled = false;
|
||||
|
||||
while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
RowResult values = server.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
if (valuesfound == 0) {
|
||||
throw new NoSuchElementException(
|
||||
|
@ -294,11 +294,10 @@ public class HBaseAdmin implements HConstants {
|
|||
break;
|
||||
}
|
||||
valuesfound += 1;
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
if (key.getColumn().equals(COL_REGIONINFO)) {
|
||||
for (Map.Entry<Text, Cell> e: values.entrySet()) {
|
||||
if (e.getKey().equals(COL_REGIONINFO)) {
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
((ImmutableBytesWritable) e.getValue()).get(), info);
|
||||
e.getValue().getValue(), info);
|
||||
|
||||
isenabled = !info.isOffline();
|
||||
break;
|
||||
|
@ -386,7 +385,7 @@ public class HBaseAdmin implements HConstants {
|
|||
|
||||
boolean disabled = false;
|
||||
while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
RowResult values = server.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
if (valuesfound == 0) {
|
||||
throw new NoSuchElementException("table " + tableName + " not found");
|
||||
|
@ -394,11 +393,10 @@ public class HBaseAdmin implements HConstants {
|
|||
break;
|
||||
}
|
||||
valuesfound += 1;
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
if (key.getColumn().equals(COL_REGIONINFO)) {
|
||||
for (Map.Entry<Text, Cell> e: values.entrySet()) {
|
||||
if (e.getKey().equals(COL_REGIONINFO)) {
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
((ImmutableBytesWritable) e.getValue()).get(), info);
|
||||
e.getValue().getValue(), info);
|
||||
|
||||
disabled = info.isOffline();
|
||||
break;
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* A non-instantiable class that manages connections to multiple tables in
|
||||
|
@ -265,27 +266,21 @@ public class HConnectionManager implements HConstants {
|
|||
// open a scanner over the meta region
|
||||
scannerId = server.openScanner(
|
||||
metaLocation.getRegionInfo().getRegionName(),
|
||||
COLUMN_FAMILY_ARRAY, startRow, LATEST_TIMESTAMP,
|
||||
null);
|
||||
new Text[]{COL_REGIONINFO}, startRow, LATEST_TIMESTAMP, null);
|
||||
|
||||
// iterate through the scanner, accumulating unique table names
|
||||
while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
RowResult values = server.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
if (key.getColumn().equals(COL_REGIONINFO)) {
|
||||
HRegionInfo info = new HRegionInfo();
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
((ImmutableBytesWritable) e.getValue()).get(), info);
|
||||
|
||||
HRegionInfo info =
|
||||
Writables.getHRegionInfo(values.get(COL_REGIONINFO));
|
||||
|
||||
// Only examine the rows where the startKey is zero length
|
||||
if (info.getStartKey().getLength() == 0) {
|
||||
uniqueTables.add(info.getTableDesc());
|
||||
}
|
||||
}
|
||||
// Only examine the rows where the startKey is zero length
|
||||
if (info.getStartKey().getLength() == 0) {
|
||||
uniqueTables.add(info.getTableDesc());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -447,11 +442,9 @@ public class HConnectionManager implements HConstants {
|
|||
throw new IllegalStateException("region offline: " +
|
||||
regionInfo.getRegionName());
|
||||
}
|
||||
|
||||
Cell serverValue = results.get(COL_SERVER);
|
||||
|
||||
String serverAddress = Writables.bytesToString(
|
||||
serverValue == null ? null : serverValue.getValue());
|
||||
String serverAddress =
|
||||
Writables.cellToString(results.get(COL_SERVER));
|
||||
|
||||
if (serverAddress.equals("")) {
|
||||
throw new NoServerForRegionException(
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.filter.StopRowFilter;
|
|||
import org.apache.hadoop.hbase.filter.WhileMatchRowFilter;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
@ -198,36 +199,28 @@ public class HTable implements HConstants {
|
|||
// open a scanner over the meta region
|
||||
scannerId = server.openScanner(
|
||||
metaLocation.getRegionInfo().getRegionName(),
|
||||
COLUMN_FAMILY_ARRAY, tableName, LATEST_TIMESTAMP,
|
||||
new Text[]{COL_REGIONINFO}, tableName, LATEST_TIMESTAMP,
|
||||
null);
|
||||
|
||||
// iterate through the scanner, accumulating unique table names
|
||||
SCANNER_LOOP: while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
// iterate through the scanner, accumulating unique region names
|
||||
while (true) {
|
||||
RowResult values = server.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey key = (HStoreKey) e.getKey();
|
||||
if (key.getColumn().equals(COL_REGIONINFO)) {
|
||||
HRegionInfo info = new HRegionInfo();
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
((ImmutableBytesWritable) e.getValue()).get(), info);
|
||||
|
||||
if (!info.getTableDesc().getName().equals(this.tableName)) {
|
||||
break SCANNER_LOOP;
|
||||
}
|
||||
|
||||
if (info.isOffline()) {
|
||||
continue SCANNER_LOOP;
|
||||
}
|
||||
|
||||
if (info.isSplit()) {
|
||||
continue SCANNER_LOOP;
|
||||
}
|
||||
|
||||
keyList.add(info.getStartKey());
|
||||
}
|
||||
|
||||
HRegionInfo info = new HRegionInfo();
|
||||
info = (HRegionInfo) Writables.getWritable(
|
||||
values.get(COL_REGIONINFO).getValue(), info);
|
||||
|
||||
if (!info.getTableDesc().getName().equals(this.tableName)) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (info.isOffline() || info.isSplit()) {
|
||||
continue;
|
||||
} else {
|
||||
keyList.add(info.getStartKey());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -889,7 +882,7 @@ public class HTable implements HConstants {
|
|||
if (this.closed) {
|
||||
return false;
|
||||
}
|
||||
HbaseMapWritable values = null;
|
||||
RowResult values = null;
|
||||
// Clear the results so we don't inherit any values from any previous
|
||||
// calls to next.
|
||||
results.clear();
|
||||
|
@ -898,13 +891,12 @@ public class HTable implements HConstants {
|
|||
} while (values != null && values.size() == 0 && nextScanner());
|
||||
|
||||
if (values != null && values.size() != 0) {
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey k = (HStoreKey) e.getKey();
|
||||
key.setRow(k.getRow());
|
||||
key.setVersion(k.getTimestamp());
|
||||
for (Map.Entry<Text, Cell> e: values.entrySet()) {
|
||||
// HStoreKey k = (HStoreKey) e.getKey();
|
||||
key.setRow(values.getRow());
|
||||
key.setVersion(e.getValue().getTimestamp());
|
||||
key.setColumn(EMPTY_COLUMN);
|
||||
results.put(k.getColumn(),
|
||||
((ImmutableBytesWritable) e.getValue()).get());
|
||||
results.put(e.getKey(), e.getValue().getValue());
|
||||
}
|
||||
}
|
||||
return values == null ? false : values.size() != 0;
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.io.Text;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* This is a customized version of the polymorphic hadoop
|
||||
|
@ -120,6 +121,7 @@ public class HbaseObjectWritable implements Writable, Configurable {
|
|||
} catch (ClassNotFoundException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
addToMap(RowResult.class, code++);
|
||||
}
|
||||
|
||||
private Class<?> declaredClass;
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.io;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
public class RowResult implements Writable, Map<Text, Cell> {
|
||||
protected Text row;
|
||||
protected HbaseMapWritable cells;
|
||||
|
||||
/**
|
||||
* Used by Writable
|
||||
*/
|
||||
public RowResult () {
|
||||
row = new Text();
|
||||
cells = new HbaseMapWritable();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a RowResult from a row and Cell map
|
||||
*/
|
||||
public RowResult (final Text row, final HbaseMapWritable hbw) {
|
||||
this.row = row;
|
||||
this.cells = hbw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the row for this RowResult
|
||||
*/
|
||||
public Text getRow() {
|
||||
return row;
|
||||
}
|
||||
|
||||
//
|
||||
// Map interface
|
||||
//
|
||||
|
||||
public Cell put(Text key, Cell value) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
public void putAll(Map map) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
public Cell get(Object key) {
|
||||
return (Cell)cells.get(key);
|
||||
}
|
||||
|
||||
public Cell remove(Object key) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
public boolean containsKey(Object key) {
|
||||
return cells.containsKey(key);
|
||||
}
|
||||
|
||||
public boolean containsValue(Object value) {
|
||||
throw new UnsupportedOperationException("Don't support containsValue!");
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return cells.isEmpty();
|
||||
}
|
||||
|
||||
public int size() {
|
||||
return cells.size();
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
public Set<Text> keySet() {
|
||||
Set<Text> result = new HashSet<Text>();
|
||||
for (Writable w : cells.keySet()) {
|
||||
result.add((Text)w);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public Set<Map.Entry<Text, Cell>> entrySet() {
|
||||
Set<Map.Entry<Text, Cell>> result = new HashSet<Map.Entry<Text, Cell>>();
|
||||
for (Map.Entry<Writable, Writable> e : cells.entrySet()) {
|
||||
result.add(new Entry((Text)e.getKey(), (Cell)e.getValue()));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public Collection<Cell> values() {
|
||||
ArrayList<Cell> result = new ArrayList<Cell>();
|
||||
for (Writable w : cells.values()) {
|
||||
result.add((Cell)w);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Cell that corresponds to column
|
||||
*/
|
||||
public Cell get(Text column) {
|
||||
return (Cell)cells.get(column);
|
||||
}
|
||||
|
||||
public class Entry implements Map.Entry<Text, Cell> {
|
||||
private Text row;
|
||||
private Cell cell;
|
||||
|
||||
Entry(Text row, Cell cell) {
|
||||
this.row = row;
|
||||
this.cell = cell;
|
||||
}
|
||||
|
||||
public Cell setValue(Cell c) {
|
||||
throw new UnsupportedOperationException("RowResult is read-only!");
|
||||
}
|
||||
|
||||
public Text getKey() {
|
||||
return row;
|
||||
}
|
||||
|
||||
public Cell getValue() {
|
||||
return cell;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Writable
|
||||
//
|
||||
|
||||
public void readFields(final DataInput in) throws IOException {
|
||||
row.readFields(in);
|
||||
cells.readFields(in);
|
||||
}
|
||||
|
||||
public void write(final DataOutput out) throws IOException {
|
||||
row.write(out);
|
||||
cells.write(out);
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -219,7 +220,7 @@ public interface HRegionInterface extends VersionedProtocol {
|
|||
* @return map of values
|
||||
* @throws IOException
|
||||
*/
|
||||
public HbaseMapWritable next(long scannerId) throws IOException;
|
||||
public RowResult next(long scannerId) throws IOException;
|
||||
|
||||
/**
|
||||
* Close a scanner
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownScannerException;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HStoreFile;
|
||||
|
@ -149,8 +150,8 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
|
||||
// Array to hold list of split parents found. Scan adds to list. After
|
||||
// scan we go check if parents can be removed.
|
||||
Map<HRegionInfo, SortedMap<Text, byte[]>> splitParents =
|
||||
new HashMap<HRegionInfo, SortedMap<Text, byte[]>>();
|
||||
Map<HRegionInfo, RowResult> splitParents =
|
||||
new HashMap<HRegionInfo, RowResult>();
|
||||
try {
|
||||
regionServer = master.connection.getHRegionConnection(region.getServer());
|
||||
scannerId =
|
||||
|
@ -159,22 +160,18 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
|
||||
int numberOfRegionsFound = 0;
|
||||
while (true) {
|
||||
HbaseMapWritable values = regionServer.next(scannerId);
|
||||
RowResult values = regionServer.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// TODO: Why does this have to be a sorted map?
|
||||
SortedMap<Text, byte[]> results =
|
||||
RowMap.fromHbaseMapWritable(values).getMap();
|
||||
|
||||
HRegionInfo info = master.getHRegionInfo(results);
|
||||
HRegionInfo info = master.getHRegionInfo(values);
|
||||
if (info == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
String serverName = Writables.bytesToString(results.get(COL_SERVER));
|
||||
long startCode = Writables.bytesToLong(results.get(COL_STARTCODE));
|
||||
String serverName = Writables.cellToString(values.get(COL_SERVER));
|
||||
long startCode = Writables.cellToLong(values.get(COL_STARTCODE));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(Thread.currentThread().getName() + " regioninfo: {" +
|
||||
info.toString() + "}, server: " + serverName + ", startCode: " +
|
||||
|
@ -184,7 +181,7 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
// Note Region has been assigned.
|
||||
checkAssigned(info, serverName, startCode);
|
||||
if (isSplitParent(info)) {
|
||||
splitParents.put(info, results);
|
||||
splitParents.put(info, values);
|
||||
}
|
||||
numberOfRegionsFound += 1;
|
||||
}
|
||||
|
@ -216,8 +213,7 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
// Scan is finished. Take a look at split parents to see if any we can
|
||||
// clean up.
|
||||
if (splitParents.size() > 0) {
|
||||
for (Map.Entry<HRegionInfo, SortedMap<Text, byte[]>> e:
|
||||
splitParents.entrySet()) {
|
||||
for (Map.Entry<HRegionInfo, RowResult> e : splitParents.entrySet()) {
|
||||
HRegionInfo hri = e.getKey();
|
||||
cleanupSplits(region.getRegionName(), regionServer, hri, e.getValue());
|
||||
}
|
||||
|
@ -252,8 +248,8 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
private boolean cleanupSplits(final Text metaRegionName,
|
||||
final HRegionInterface srvr, final HRegionInfo parent,
|
||||
SortedMap<Text, byte[]> rowContent)
|
||||
final HRegionInterface srvr, final HRegionInfo parent,
|
||||
RowResult rowContent)
|
||||
throws IOException {
|
||||
boolean result = false;
|
||||
|
||||
|
@ -295,11 +291,11 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
*/
|
||||
protected boolean hasReferences(final Text metaRegionName,
|
||||
final HRegionInterface srvr, final Text parent,
|
||||
SortedMap<Text, byte[]> rowContent, final Text splitColumn)
|
||||
RowResult rowContent, final Text splitColumn)
|
||||
throws IOException {
|
||||
boolean result = false;
|
||||
HRegionInfo split =
|
||||
Writables.getHRegionInfoOrNull(rowContent.get(splitColumn));
|
||||
Writables.getHRegionInfoOrNull(rowContent.get(splitColumn).getValue());
|
||||
if (split == null) {
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -46,6 +46,8 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.ipc.HbaseRPC;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.InfoServer;
|
||||
|
@ -614,18 +616,16 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
long scannerid = srvr.openScanner(metaRegionName, COL_REGIONINFO_ARRAY,
|
||||
tableName, System.currentTimeMillis(), null);
|
||||
try {
|
||||
HbaseMapWritable data = srvr.next(scannerid);
|
||||
RowResult data = srvr.next(scannerid);
|
||||
|
||||
// Test data and that the row for the data is for our table. If table
|
||||
// does not exist, scanner will return row after where our table would
|
||||
// be inserted if it exists so look for exact match on table name.
|
||||
if (data != null && data.size() > 0) {
|
||||
for (Writable k: data.keySet()) {
|
||||
if (HRegionInfo.getTableNameFromRegionName(
|
||||
((HStoreKey) k).getRow()).equals(tableName)) {
|
||||
// Then a region for this table already exists. Ergo table exists.
|
||||
throw new TableExistsException(tableName.toString());
|
||||
}
|
||||
if (HRegionInfo.getTableNameFromRegionName(
|
||||
data.getRow()).equals(tableName)) {
|
||||
// Then a region for this table already exists. Ergo table exists.
|
||||
throw new TableExistsException(tableName.toString());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -697,15 +697,15 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
* @return Null or found HRegionInfo.
|
||||
* @throws IOException
|
||||
*/
|
||||
HRegionInfo getHRegionInfo(final Map<Text, byte[]> map)
|
||||
HRegionInfo getHRegionInfo(final Map<Text, Cell> map)
|
||||
throws IOException {
|
||||
byte [] bytes = map.get(COL_REGIONINFO);
|
||||
if (bytes == null) {
|
||||
Cell regioninfo = map.get(COL_REGIONINFO);
|
||||
if (regioninfo == null) {
|
||||
LOG.warn(COL_REGIONINFO.toString() + " is empty; has keys: " +
|
||||
map.keySet().toString());
|
||||
return null;
|
||||
}
|
||||
return (HRegionInfo)Writables.getWritable(bytes, new HRegionInfo());
|
||||
return (HRegionInfo)Writables.getWritable(regioninfo.getValue(), new HRegionInfo());
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.HLog;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* Instantiated when a server's lease has expired, meaning it has crashed.
|
||||
|
@ -97,7 +98,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
|
||||
try {
|
||||
while (true) {
|
||||
HbaseMapWritable values = null;
|
||||
RowResult values = null;
|
||||
try {
|
||||
values = server.next(scannerId);
|
||||
} catch (IOException e) {
|
||||
|
@ -108,10 +109,9 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
// TODO: Why does this have to be a sorted map?
|
||||
RowMap rm = RowMap.fromHbaseMapWritable(values);
|
||||
Text row = rm.getRow();
|
||||
SortedMap<Text, byte[]> map = rm.getMap();
|
||||
|
||||
Text row = values.getRow();
|
||||
|
||||
if (LOG.isDebugEnabled() && row != null) {
|
||||
LOG.debug("shutdown scanner looking at " + row.toString());
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
// missed edits in hlog because hdfs does not do write-append).
|
||||
String serverName;
|
||||
try {
|
||||
serverName = Writables.bytesToString(map.get(COL_SERVER));
|
||||
serverName = Writables.cellToString(values.get(COL_SERVER));
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
LOG.error("Server name", e);
|
||||
break;
|
||||
|
@ -133,7 +133,7 @@ class ProcessServerShutdown extends RegionServerOperation {
|
|||
}
|
||||
|
||||
// Bingo! Found it.
|
||||
HRegionInfo info = master.getHRegionInfo(map);
|
||||
HRegionInfo info = master.getHRegionInfo(values);
|
||||
if (info == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -35,6 +35,8 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* Abstract base class for operations that need to examine all HRegionInfo
|
||||
|
@ -95,19 +97,19 @@ abstract class TableOperation implements HConstants {
|
|||
|
||||
try {
|
||||
while (true) {
|
||||
HbaseMapWritable values = server.next(scannerId);
|
||||
RowResult values = server.next(scannerId);
|
||||
if(values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
RowMap rm = RowMap.fromHbaseMapWritable(values);
|
||||
SortedMap<Text, byte[]> map = rm.getMap();
|
||||
HRegionInfo info = this.master.getHRegionInfo(map);
|
||||
HRegionInfo info = this.master.getHRegionInfo(values);
|
||||
if (info == null) {
|
||||
throw new IOException(COL_REGIONINFO + " not found on " +
|
||||
rm.getRow());
|
||||
values.getRow());
|
||||
}
|
||||
String serverName = Writables.bytesToString(map.get(COL_SERVER));
|
||||
long startCode = Writables.bytesToLong(map.get(COL_STARTCODE));
|
||||
String serverName =
|
||||
Writables.cellToString(values.get(COL_SERVER));
|
||||
long startCode =
|
||||
Writables.cellToLong(values.get(COL_STARTCODE));
|
||||
if (info.getTableDesc().getName().compareTo(tableName) > 0) {
|
||||
break; // Beyond any more entries for this table
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
|||
import org.apache.hadoop.hbase.io.BatchOperation;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
@ -1204,7 +1205,8 @@ public class HRegion implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public HScannerInterface getScanner(Text[] cols, Text firstRow,
|
||||
long timestamp, RowFilterInterface filter) throws IOException {
|
||||
long timestamp, RowFilterInterface filter)
|
||||
throws IOException {
|
||||
lock.readLock().lock();
|
||||
try {
|
||||
if (this.closed.get()) {
|
||||
|
@ -1639,7 +1641,7 @@ public class HRegion implements HConstants {
|
|||
/** Create an HScanner with a handle on many HStores. */
|
||||
@SuppressWarnings("unchecked")
|
||||
HScanner(Text[] cols, Text firstRow, long timestamp, HStore[] stores,
|
||||
RowFilterInterface filter)
|
||||
RowFilterInterface filter)
|
||||
throws IOException {
|
||||
this.scanners = new HInternalScannerInterface[stores.length];
|
||||
try {
|
||||
|
@ -1756,7 +1758,6 @@ public class HRegion implements HConstants {
|
|||
return moreToFollow;
|
||||
}
|
||||
|
||||
|
||||
/** Shut down a single scanner */
|
||||
void closeScanner(int i) {
|
||||
try {
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.hbase.UnknownScannerException;
|
|||
import org.apache.hadoop.hbase.filter.RowFilterInterface;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
import org.apache.hadoop.hbase.io.HbaseMapWritable;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
||||
|
@ -1006,8 +1007,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
public HbaseMapWritable next(final long scannerId) throws IOException {
|
||||
|
||||
public RowResult next(final long scannerId) throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
|
@ -1024,8 +1024,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
TreeMap<Text, byte []> results = new TreeMap<Text, byte []>();
|
||||
while (s.next(key, results)) {
|
||||
for(Map.Entry<Text, byte []> e: results.entrySet()) {
|
||||
values.put(new HStoreKey(key.getRow(), e.getKey(), key.getTimestamp()),
|
||||
new ImmutableBytesWritable(e.getValue()));
|
||||
values.put(e.getKey(), new Cell(e.getValue(), key.getTimestamp()));
|
||||
}
|
||||
|
||||
if(values.size() > 0) {
|
||||
|
@ -1036,8 +1035,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
// No data for this row, go get another.
|
||||
results.clear();
|
||||
}
|
||||
return values;
|
||||
|
||||
return new RowResult(key.getRow(), values);
|
||||
} catch (IOException e) {
|
||||
checkFileSystem();
|
||||
throw e;
|
||||
|
@ -1064,8 +1062,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
|
||||
/** {@inheritDoc} */
|
||||
public long openScanner(Text regionName, Text[] cols, Text firstRow,
|
||||
final long timestamp, final RowFilterInterface filter)
|
||||
throws IOException {
|
||||
final long timestamp, final RowFilterInterface filter)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
requestCount.incrementAndGet();
|
||||
try {
|
||||
|
|
|
@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
|||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* HStore maintains a bunch of data files. It is responsible for maintaining
|
||||
|
@ -2252,7 +2252,8 @@ public class HStore implements HConstants {
|
|||
/** Create an Scanner with a handle on the memcache and HStore files. */
|
||||
@SuppressWarnings("unchecked")
|
||||
HStoreScanner(Text[] targetCols, Text firstRow, long timestamp,
|
||||
RowFilterInterface filter) throws IOException {
|
||||
RowFilterInterface filter)
|
||||
throws IOException {
|
||||
|
||||
this.dataFilter = filter;
|
||||
if (null != dataFilter) {
|
||||
|
@ -2454,7 +2455,6 @@ public class HStore implements HConstants {
|
|||
|
||||
return moreToFollow;
|
||||
}
|
||||
|
||||
|
||||
/** Shut down a single scanner */
|
||||
void closeScanner(int i) {
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.io.DataInputBuffer;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
|
||||
/**
|
||||
* Utility class with methods for manipulating Writable objects
|
||||
|
@ -112,6 +113,19 @@ public class Writables {
|
|||
(HRegionInfo)null: getHRegionInfo(bytes);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cell Cell object containing the serialized HRegionInfo
|
||||
* @return A HRegionInfo instance built out of passed <code>cell</code>.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static HRegionInfo getHRegionInfo(final Cell cell) throws IOException {
|
||||
if (cell == null) {
|
||||
return null;
|
||||
} else {
|
||||
return getHRegionInfo(cell.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy one Writable to another. Copies bytes using data streams.
|
||||
* @param src Source Writable
|
||||
|
@ -184,4 +198,22 @@ public class Writables {
|
|||
}
|
||||
return new String(bytes, HConstants.UTF8_ENCODING);
|
||||
}
|
||||
|
||||
public static String cellToString(Cell c)
|
||||
throws UnsupportedEncodingException {
|
||||
if (c == null) {
|
||||
return "";
|
||||
} else {
|
||||
return bytesToString(c.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public static long cellToLong(Cell c)
|
||||
throws IOException {
|
||||
if (c == null) {
|
||||
return 0;
|
||||
} else {
|
||||
return bytesToLong(c.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.io.RowResult;
|
||||
|
||||
/**
|
||||
* Additional scanner tests.
|
||||
|
@ -374,26 +375,19 @@ public class DisabledTestScanner2 extends HBaseClusterTestCase {
|
|||
HConstants.COLUMN_FAMILY_ARRAY, new Text(),
|
||||
System.currentTimeMillis(), null);
|
||||
while (true) {
|
||||
TreeMap<Text, byte[]> results = new TreeMap<Text, byte[]>();
|
||||
HbaseMapWritable values = regionServer.next(scannerId);
|
||||
RowResult values = regionServer.next(scannerId);
|
||||
if (values == null || values.size() == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
for (Map.Entry<Writable, Writable> e: values.entrySet()) {
|
||||
HStoreKey k = (HStoreKey) e.getKey();
|
||||
results.put(k.getColumn(),
|
||||
((ImmutableBytesWritable) e.getValue()).get());
|
||||
}
|
||||
|
||||
HRegionInfo info = (HRegionInfo) Writables.getWritable(
|
||||
results.get(HConstants.COL_REGIONINFO), new HRegionInfo());
|
||||
values.get(HConstants.COL_REGIONINFO).getValue(), new HRegionInfo());
|
||||
|
||||
byte[] bytes = results.get(HConstants.COL_SERVER);
|
||||
String serverName = Writables.bytesToString(bytes);
|
||||
String serverName =
|
||||
Writables.cellToString(values.get(HConstants.COL_SERVER));
|
||||
|
||||
long startCode =
|
||||
Writables.bytesToLong(results.get(HConstants.COL_STARTCODE));
|
||||
Writables.cellToLong(values.get(HConstants.COL_STARTCODE));
|
||||
|
||||
LOG.info(Thread.currentThread().getName() + " scanner: "
|
||||
+ Long.valueOf(scannerId) + ": regioninfo: {" + info.toString()
|
||||
|
|
|
@ -67,7 +67,7 @@ public class TestGet extends HBaseTestCase {
|
|||
for(Iterator<Text> i = values.keySet().iterator(); i.hasNext(); ) {
|
||||
Text column = i.next();
|
||||
if (column.equals(HConstants.COL_SERVER)) {
|
||||
String server = Writables.bytesToString(values.get(column).getValue());
|
||||
String server = Writables.cellToString(values.get(column));
|
||||
assertEquals(expectedServer, server);
|
||||
LOG.info(server);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue