Javadoc, copyright, static analysis cleanup. Bug fix in HConnectionManager.java, code removal in Put.java.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@920548 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1ccce74312
commit
81d947ab3f
|
@ -9,3 +9,4 @@
|
|||
/contrib/stargate/target/
|
||||
/contrib/transactional/target/
|
||||
/core/target/
|
||||
*.iml
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,6 +20,11 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -28,11 +33,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Used to perform Delete operations on a single row.
|
||||
* <p>
|
||||
|
@ -40,15 +40,15 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
* to delete. To further define the scope of what to delete, perform
|
||||
* additional methods as outlined below.
|
||||
* <p>
|
||||
* To delete specific families, execute {@link #deleteFamily(byte []) deleteFamily}
|
||||
* To delete specific families, execute {@link #deleteFamily(byte[]) deleteFamily}
|
||||
* for each family to delete.
|
||||
* <p>
|
||||
* To delete multiple versions of specific columns, execute
|
||||
* {@link #deleteColumns(byte [],byte []) deleteColumns}
|
||||
* {@link #deleteColumns(byte[], byte[]) deleteColumns}
|
||||
* for each column to delete.
|
||||
* <p>
|
||||
* To delete specific versions of specific columns, execute
|
||||
* {@link #deleteColumn(byte [],byte [],long) deleteColumn}
|
||||
* {@link #deleteColumn(byte[], byte[], long) deleteColumn}
|
||||
* for each column version to delete.
|
||||
* <p>
|
||||
* Specifying timestamps, deleteFamily and deleteColumns will delete all
|
||||
|
@ -142,6 +142,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* Overrides previous calls to deleteColumn and deleteColumns for the
|
||||
* specified family.
|
||||
* @param family family name
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteFamily(byte [] family) {
|
||||
this.deleteFamily(family, HConstants.LATEST_TIMESTAMP);
|
||||
|
@ -156,6 +157,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* specified family.
|
||||
* @param family family name
|
||||
* @param timestamp maximum version timestamp
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteFamily(byte [] family, long timestamp) {
|
||||
List<KeyValue> list = familyMap.get(family);
|
||||
|
@ -173,6 +175,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* Delete all versions of the specified column.
|
||||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumns(byte [] family, byte [] qualifier) {
|
||||
this.deleteColumns(family, qualifier, HConstants.LATEST_TIMESTAMP);
|
||||
|
@ -185,6 +188,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @param timestamp maximum version timestamp
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumns(byte [] family, byte [] qualifier, long timestamp) {
|
||||
List<KeyValue> list = familyMap.get(family);
|
||||
|
@ -204,6 +208,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* the fetched cells timestamp.
|
||||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumn(byte [] family, byte [] qualifier) {
|
||||
this.deleteColumn(family, qualifier, HConstants.LATEST_TIMESTAMP);
|
||||
|
@ -215,6 +220,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @param timestamp version timestamp
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumn(byte [] family, byte [] qualifier, long timestamp) {
|
||||
List<KeyValue> list = familyMap.get(family);
|
||||
|
@ -351,6 +357,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* @param column colon-delimited family and qualifier
|
||||
* @param timestamp maximum version timestamp
|
||||
* @deprecated use {@link #deleteColumn(byte[], byte[], long)} instead
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumns(byte [] column, long timestamp) {
|
||||
byte [][] parts = KeyValue.parseColumn(column);
|
||||
|
@ -363,6 +370,7 @@ public class Delete implements Writable, Row, Comparable<Row> {
|
|||
* <code>family:qualifier</code> notation.
|
||||
* @param column colon-delimited family and qualifier
|
||||
* @deprecated use {@link #deleteColumn(byte[], byte[])} instead
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Delete deleteColumn(byte [] column) {
|
||||
byte [][] parts = KeyValue.parseColumn(column);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,6 +19,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -28,15 +36,6 @@ import java.util.Set;
|
|||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
import org.apache.hadoop.hbase.io.TimeRange;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
|
||||
/**
|
||||
* Used to perform Get operations on a single row.
|
||||
* <p>
|
||||
|
@ -138,6 +137,7 @@ public class Get implements Writable {
|
|||
* @param minStamp minimum timestamp value, inclusive
|
||||
* @param maxStamp maximum timestamp value, exclusive
|
||||
* @throws IOException if invalid time range
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Get setTimeRange(long minStamp, long maxStamp)
|
||||
throws IOException {
|
||||
|
@ -148,6 +148,7 @@ public class Get implements Writable {
|
|||
/**
|
||||
* Get versions of columns with the specified timestamp.
|
||||
* @param timestamp version timestamp
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Get setTimeStamp(long timestamp) {
|
||||
try {
|
||||
|
@ -160,6 +161,7 @@ public class Get implements Writable {
|
|||
|
||||
/**
|
||||
* Get all available versions.
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Get setMaxVersions() {
|
||||
this.maxVersions = Integer.MAX_VALUE;
|
||||
|
@ -170,6 +172,7 @@ public class Get implements Writable {
|
|||
* Get up to the specified number of versions of each column.
|
||||
* @param maxVersions maximum versions for each column
|
||||
* @throws IOException if invalid number of versions
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Get setMaxVersions(int maxVersions) throws IOException {
|
||||
if(maxVersions <= 0) {
|
||||
|
@ -184,6 +187,7 @@ public class Get implements Writable {
|
|||
* Only {@link Filter#filterKeyValue(KeyValue)} is called AFTER all tests
|
||||
* for ttl, column match, deletes and max versions have been run.
|
||||
* @param filter filter to run on the server
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
public Get setFilter(Filter filter) {
|
||||
this.filter = filter;
|
||||
|
@ -280,9 +284,10 @@ public class Get implements Writable {
|
|||
sb.append("row=");
|
||||
sb.append(Bytes.toString(this.row));
|
||||
sb.append(", maxVersions=");
|
||||
sb.append("" + this.maxVersions);
|
||||
sb.append("").append(this.maxVersions);
|
||||
sb.append(", timeRange=");
|
||||
sb.append("[" + this.tr.getMin() + "," + this.tr.getMax() + ")");
|
||||
sb.append("[").append(this.tr.getMin()).append(",");
|
||||
sb.append(this.tr.getMax()).append(")");
|
||||
sb.append(", families=");
|
||||
if(this.familyMap.size() == 0) {
|
||||
sb.append("ALL");
|
||||
|
@ -398,19 +403,22 @@ public class Get implements Writable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Adds an array of columns specified the old format, family:qualifier.
|
||||
* <p>
|
||||
* Overrides previous calls to addFamily for any families in the input.
|
||||
* @param columns array of columns, formatted as <pre>family:qualifier</pre>
|
||||
* @deprecated issue multiple {@link #addColumn(byte[], byte[])} instead
|
||||
* @return this for invocation chaining
|
||||
*/
|
||||
@SuppressWarnings({"deprecation"})
|
||||
public Get addColumns(byte [][] columns) {
|
||||
if (columns == null) return this;
|
||||
for(int i = 0; i < columns.length; i++) {
|
||||
for (byte[] column : columns) {
|
||||
try {
|
||||
addColumn(columns[i]);
|
||||
} catch(Exception e) {}
|
||||
addColumn(column);
|
||||
} catch (Exception ignored) {
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,10 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -47,6 +43,10 @@ import org.apache.hadoop.io.BooleanWritable;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
|
||||
/**
|
||||
* Provides administrative functions for HBase
|
||||
*/
|
||||
|
@ -63,7 +63,7 @@ public class HBaseAdmin {
|
|||
* Constructor
|
||||
*
|
||||
* @param conf Configuration object
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public HBaseAdmin(Configuration conf) throws MasterNotRunningException {
|
||||
this.connection = HConnectionManager.getConnection(conf);
|
||||
|
@ -80,7 +80,7 @@ public class HBaseAdmin {
|
|||
|
||||
/**
|
||||
* @return proxy connection to master server for this instance
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public HMasterInterface getMaster() throws MasterNotRunningException{
|
||||
return this.connection.getMaster();
|
||||
|
@ -94,7 +94,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName Table to check.
|
||||
* @return True if table exists already.
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public boolean tableExists(final String tableName)
|
||||
throws MasterNotRunningException {
|
||||
|
@ -104,7 +104,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName Table to check.
|
||||
* @return True if table exists already.
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public boolean tableExists(final byte [] tableName)
|
||||
throws MasterNotRunningException {
|
||||
|
@ -122,7 +122,7 @@ public class HBaseAdmin {
|
|||
* Right now, it only exists as part of the META table's region info.
|
||||
*
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTableDescriptor[] listTables() throws IOException {
|
||||
return this.connection.listTables();
|
||||
|
@ -133,7 +133,7 @@ public class HBaseAdmin {
|
|||
* Method for getting the tableDescriptor
|
||||
* @param tableName as a byte []
|
||||
* @return the tableDescriptor
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTableDescriptor getTableDescriptor(final byte [] tableName)
|
||||
throws IOException {
|
||||
|
@ -158,7 +158,7 @@ public class HBaseAdmin {
|
|||
* @throws TableExistsException if table already exists (If concurrent
|
||||
* threads, the table may have been created between test-for-existence
|
||||
* and attempt-at-creation).
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void createTable(HTableDescriptor desc)
|
||||
throws IOException {
|
||||
|
@ -195,7 +195,7 @@ public class HBaseAdmin {
|
|||
* @throws TableExistsException if table already exists (If concurrent
|
||||
* threads, the table may have been created between test-for-existence
|
||||
* and attempt-at-creation).
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void createTableAsync(HTableDescriptor desc)
|
||||
throws IOException {
|
||||
|
@ -215,7 +215,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of table to delete
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteTable(final String tableName) throws IOException {
|
||||
deleteTable(Bytes.toBytes(tableName));
|
||||
|
@ -226,7 +226,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of table to delete
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteTable(final byte [] tableName) throws IOException {
|
||||
if (this.master == null) {
|
||||
|
@ -257,10 +257,9 @@ public class HBaseAdmin {
|
|||
break;
|
||||
}
|
||||
boolean found = false;
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
Result r = values[i];
|
||||
for (Result r : values) {
|
||||
NavigableMap<byte[], byte[]> infoValues =
|
||||
r.getFamilyMap(HConstants.CATALOG_FAMILY);
|
||||
r.getFamilyMap(HConstants.CATALOG_FAMILY);
|
||||
for (Map.Entry<byte[], byte[]> e : infoValues.entrySet()) {
|
||||
if (Bytes.equals(e.getKey(), HConstants.REGIONINFO_QUALIFIER)) {
|
||||
info = (HRegionInfo) Writables.getWritable(e.getValue(), info);
|
||||
|
@ -310,7 +309,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void enableTable(final String tableName) throws IOException {
|
||||
enableTable(Bytes.toBytes(tableName));
|
||||
|
@ -321,7 +320,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void enableTable(final byte [] tableName) throws IOException {
|
||||
if (this.master == null) {
|
||||
|
@ -366,7 +365,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void disableTable(final String tableName) throws IOException {
|
||||
disableTable(Bytes.toBytes(tableName));
|
||||
|
@ -378,7 +377,7 @@ public class HBaseAdmin {
|
|||
* Synchronous operation.
|
||||
*
|
||||
* @param tableName name of table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void disableTable(final byte [] tableName) throws IOException {
|
||||
if (this.master == null) {
|
||||
|
@ -419,7 +418,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableEnabled(String tableName) throws IOException {
|
||||
return isTableEnabled(Bytes.toBytes(tableName));
|
||||
|
@ -427,7 +426,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableEnabled(byte[] tableName) throws IOException {
|
||||
return connection.isTableEnabled(tableName);
|
||||
|
@ -436,7 +435,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is off-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableDisabled(byte[] tableName) throws IOException {
|
||||
return connection.isTableDisabled(tableName);
|
||||
|
@ -445,7 +444,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if all regions of the table are available
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableAvailable(byte[] tableName) throws IOException {
|
||||
return connection.isTableAvailable(tableName);
|
||||
|
@ -454,7 +453,7 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if all regions of the table are available
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableAvailable(String tableName) throws IOException {
|
||||
return connection.isTableAvailable(Bytes.toBytes(tableName));
|
||||
|
@ -466,7 +465,7 @@ public class HBaseAdmin {
|
|||
*
|
||||
* @param tableName name of the table to add column to
|
||||
* @param column column descriptor of column to be added
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void addColumn(final String tableName, HColumnDescriptor column)
|
||||
throws IOException {
|
||||
|
@ -479,7 +478,7 @@ public class HBaseAdmin {
|
|||
*
|
||||
* @param tableName name of the table to add column to
|
||||
* @param column column descriptor of column to be added
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void addColumn(final byte [] tableName, HColumnDescriptor column)
|
||||
throws IOException {
|
||||
|
@ -500,7 +499,7 @@ public class HBaseAdmin {
|
|||
*
|
||||
* @param tableName name of table
|
||||
* @param columnName name of column to be deleted
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteColumn(final String tableName, final String columnName)
|
||||
throws IOException {
|
||||
|
@ -513,7 +512,7 @@ public class HBaseAdmin {
|
|||
*
|
||||
* @param tableName name of table
|
||||
* @param columnName name of column to be deleted
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void deleteColumn(final byte [] tableName, final byte [] columnName)
|
||||
throws IOException {
|
||||
|
@ -535,7 +534,7 @@ public class HBaseAdmin {
|
|||
* @param tableName name of table
|
||||
* @param columnName name of column to be modified
|
||||
* @param descriptor new column descriptor to use
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void modifyColumn(final String tableName, final String columnName,
|
||||
HColumnDescriptor descriptor)
|
||||
|
@ -551,7 +550,7 @@ public class HBaseAdmin {
|
|||
* @param tableName name of table
|
||||
* @param columnName name of column to be modified
|
||||
* @param descriptor new column descriptor to use
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void modifyColumn(final byte [] tableName, final byte [] columnName,
|
||||
HColumnDescriptor descriptor)
|
||||
|
@ -571,10 +570,10 @@ public class HBaseAdmin {
|
|||
* Close a region. For expert-admins.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param regionname
|
||||
* @param regionname region name to close
|
||||
* @param args Optional server name. Otherwise, we'll send close to the
|
||||
* server registered in .META.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void closeRegion(final String regionname, final Object... args)
|
||||
throws IOException {
|
||||
|
@ -585,10 +584,10 @@ public class HBaseAdmin {
|
|||
* Close a region. For expert-admins.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param regionname
|
||||
* @param regionname region name to close
|
||||
* @param args Optional server name. Otherwise, we'll send close to the
|
||||
* server registered in .META.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void closeRegion(final byte [] regionname, final Object... args)
|
||||
throws IOException {
|
||||
|
@ -598,9 +597,7 @@ public class HBaseAdmin {
|
|||
Object [] newargs = new Object[len + xtraArgsCount];
|
||||
newargs[0] = regionname;
|
||||
if(args != null) {
|
||||
for (int i = 0; i < len; i++) {
|
||||
newargs[i + xtraArgsCount] = args[i];
|
||||
}
|
||||
System.arraycopy(args, 0, newargs, xtraArgsCount, len);
|
||||
}
|
||||
modifyTable(HConstants.META_TABLE_NAME, HConstants.Modify.CLOSE_REGION,
|
||||
newargs);
|
||||
|
@ -610,8 +607,8 @@ public class HBaseAdmin {
|
|||
* Flush a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void flush(final String tableNameOrRegionName) throws IOException {
|
||||
flush(Bytes.toBytes(tableNameOrRegionName));
|
||||
|
@ -621,8 +618,8 @@ public class HBaseAdmin {
|
|||
* Flush a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to flush
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void flush(final byte [] tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.Modify.TABLE_FLUSH);
|
||||
|
@ -632,8 +629,8 @@ public class HBaseAdmin {
|
|||
* Compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void compact(final String tableNameOrRegionName) throws IOException {
|
||||
compact(Bytes.toBytes(tableNameOrRegionName));
|
||||
|
@ -643,8 +640,8 @@ public class HBaseAdmin {
|
|||
* Compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void compact(final byte [] tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.Modify.TABLE_COMPACT);
|
||||
|
@ -654,8 +651,8 @@ public class HBaseAdmin {
|
|||
* Major compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void majorCompact(final String tableNameOrRegionName)
|
||||
throws IOException {
|
||||
|
@ -666,8 +663,8 @@ public class HBaseAdmin {
|
|||
* Major compact a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to major compact
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void majorCompact(final byte [] tableNameOrRegionName)
|
||||
throws IOException {
|
||||
|
@ -678,8 +675,8 @@ public class HBaseAdmin {
|
|||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table or region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void split(final String tableNameOrRegionName) throws IOException {
|
||||
split(Bytes.toBytes(tableNameOrRegionName));
|
||||
|
@ -689,8 +686,8 @@ public class HBaseAdmin {
|
|||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void split(final byte [] tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.Modify.TABLE_SPLIT);
|
||||
|
@ -722,7 +719,7 @@ public class HBaseAdmin {
|
|||
*
|
||||
* @param tableName name of table.
|
||||
* @param htd modified description of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void modifyTable(final byte [] tableName, HTableDescriptor htd)
|
||||
throws IOException {
|
||||
|
@ -737,7 +734,7 @@ public class HBaseAdmin {
|
|||
* region.
|
||||
* @param op table modification operation
|
||||
* @param args operation specific arguments
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public void modifyTable(final byte [] tableName, HConstants.Modify op,
|
||||
Object... args)
|
||||
|
@ -796,7 +793,7 @@ public class HBaseAdmin {
|
|||
} else if (args[i] instanceof String) {
|
||||
arr[i] = new ImmutableBytesWritable(Bytes.toBytes((String)args[i]));
|
||||
} else if (args[i] instanceof Boolean) {
|
||||
arr[i] = new BooleanWritable(((Boolean)args[i]).booleanValue());
|
||||
arr[i] = new BooleanWritable((Boolean) args[i]);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Requires byte [] or " +
|
||||
"ImmutableBytesWritable, not " + args[i]);
|
||||
|
@ -815,7 +812,7 @@ public class HBaseAdmin {
|
|||
|
||||
/**
|
||||
* Shuts down the HBase instance
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public synchronized void shutdown() throws IOException {
|
||||
if (this.master == null) {
|
||||
|
@ -832,7 +829,7 @@ public class HBaseAdmin {
|
|||
|
||||
/**
|
||||
* @return cluster status
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public ClusterStatus getClusterStatus() throws IOException {
|
||||
if (this.master == null) {
|
||||
|
@ -850,8 +847,8 @@ public class HBaseAdmin {
|
|||
/**
|
||||
* Check to see if HBase is running. Throw an exception if not.
|
||||
*
|
||||
* @param conf
|
||||
* @throws MasterNotRunningException
|
||||
* @param conf system configuration
|
||||
* @throws MasterNotRunningException if a remote or network exception occurs
|
||||
*/
|
||||
public static void checkHBaseAvailable(Configuration conf)
|
||||
throws MasterNotRunningException {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,11 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
@ -32,6 +27,11 @@ import org.apache.hadoop.hbase.ipc.HMasterInterface;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
/**
|
||||
* Cluster connection.
|
||||
* {@link HConnectionManager} manages instances of this class.
|
||||
|
@ -40,13 +40,13 @@ public interface HConnection {
|
|||
/**
|
||||
* Retrieve ZooKeeperWrapper used by the connection.
|
||||
* @return ZooKeeperWrapper handle being used by the connection.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public ZooKeeperWrapper getZooKeeperWrapper() throws IOException;
|
||||
|
||||
/**
|
||||
* @return proxy connection to master server for this instance
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public HMasterInterface getMaster() throws MasterNotRunningException;
|
||||
|
||||
|
@ -57,7 +57,7 @@ public interface HConnection {
|
|||
* Checks if <code>tableName</code> exists.
|
||||
* @param tableName Table to check.
|
||||
* @return True if table exists already.
|
||||
* @throws MasterNotRunningException
|
||||
* @throws MasterNotRunningException if the master is not running
|
||||
*/
|
||||
public boolean tableExists(final byte [] tableName)
|
||||
throws MasterNotRunningException;
|
||||
|
@ -66,23 +66,23 @@ public interface HConnection {
|
|||
* A table that isTableEnabled == false and isTableDisabled == false
|
||||
* is possible. This happens when a table has a lot of regions
|
||||
* that must be processed.
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return true if the table is enabled, false otherwise
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableEnabled(byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return true if the table is disabled, false otherwise
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableDisabled(byte[] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return true if all regions of the table are available, false otherwise
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public boolean isTableAvailable(byte[] tableName) throws IOException;
|
||||
|
||||
|
@ -94,14 +94,14 @@ public interface HConnection {
|
|||
* Right now, it only exists as part of the META table's region info.
|
||||
*
|
||||
* @return - returns an array of HTableDescriptors
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTableDescriptor[] listTables() throws IOException;
|
||||
|
||||
/**
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return table metadata
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTableDescriptor getHTableDescriptor(byte[] tableName)
|
||||
throws IOException;
|
||||
|
@ -113,7 +113,7 @@ public interface HConnection {
|
|||
* @param row row key you're trying to find the region of
|
||||
* @return HRegionLocation that describes where to find the reigon in
|
||||
* question
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionLocation locateRegion(final byte [] tableName,
|
||||
final byte [] row)
|
||||
|
@ -131,7 +131,7 @@ public interface HConnection {
|
|||
* @param row row key you're trying to find the region of
|
||||
* @return HRegionLocation that describes where to find the reigon in
|
||||
* question
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionLocation relocateRegion(final byte [] tableName,
|
||||
final byte [] row)
|
||||
|
@ -141,7 +141,7 @@ public interface HConnection {
|
|||
* Establishes a connection to the region server at the specified address.
|
||||
* @param regionServer - the server to connect to
|
||||
* @return proxy for HRegionServer
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionInterface getHRegionConnection(HServerAddress regionServer)
|
||||
throws IOException;
|
||||
|
@ -151,7 +151,7 @@ public interface HConnection {
|
|||
* @param regionServer - the server to connect to
|
||||
* @param getMaster - do we check if master is alive
|
||||
* @return proxy for HRegionServer
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionInterface getHRegionConnection(
|
||||
HServerAddress regionServer, boolean getMaster)
|
||||
|
@ -159,11 +159,11 @@ public interface HConnection {
|
|||
|
||||
/**
|
||||
* Find region location hosting passed row
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @param row Row to find.
|
||||
* @param reload If true do not use cache, otherwise bypass.
|
||||
* @return Location of row.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
HRegionLocation getRegionLocation(byte [] tableName, byte [] row,
|
||||
boolean reload)
|
||||
|
@ -175,10 +175,10 @@ public interface HConnection {
|
|||
* and refinds of missing regions.
|
||||
*
|
||||
* @param <T> the type of the return value
|
||||
* @param callable
|
||||
* @param callable callable to run
|
||||
* @return an object of type T
|
||||
* @throws IOException
|
||||
* @throws RuntimeException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RuntimeException other unspecified error
|
||||
*/
|
||||
public <T> T getRegionServerWithRetries(ServerCallable<T> callable)
|
||||
throws IOException, RuntimeException;
|
||||
|
@ -187,10 +187,10 @@ public interface HConnection {
|
|||
* Pass in a ServerCallable with your particular bit of logic defined and
|
||||
* this method will pass it to the defined region server.
|
||||
* @param <T> the type of the return value
|
||||
* @param callable
|
||||
* @param callable callable to run
|
||||
* @return an object of type T
|
||||
* @throws IOException
|
||||
* @throws RuntimeException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RuntimeException other unspecified error
|
||||
*/
|
||||
public <T> T getRegionServerForWithoutRetries(ServerCallable<T> callable)
|
||||
throws IOException, RuntimeException;
|
||||
|
@ -201,7 +201,7 @@ public interface HConnection {
|
|||
* @param list A batch of Puts to process.
|
||||
* @param tableName The name of the table
|
||||
* @return Count of committed Puts. On fault, < list.size().
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public int processBatchOfRows(ArrayList<Put> list, byte[] tableName)
|
||||
throws IOException;
|
||||
|
@ -211,7 +211,7 @@ public interface HConnection {
|
|||
* @param list A batch of Deletes to process.
|
||||
* @return Count of committed Deletes. On fault, < list.size().
|
||||
* @param tableName The name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public int processBatchOfDeletes(List<Delete> list, byte[] tableName)
|
||||
throws IOException;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,22 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.UndeclaredThrowableException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -64,6 +48,22 @@ import org.apache.zookeeper.WatchedEvent;
|
|||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.Watcher.Event.KeeperState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.UndeclaredThrowableException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* A non-instantiable class that manages connections to multiple tables in
|
||||
* multiple HBase instances.
|
||||
|
@ -71,8 +71,8 @@ import org.apache.zookeeper.Watcher.Event.KeeperState;
|
|||
* Used by {@link HTable} and {@link HBaseAdmin}
|
||||
*/
|
||||
public class HConnectionManager implements HConstants {
|
||||
private static final Delete [] DELETE_ARRAY_TYPE = new Delete[0];
|
||||
private static final Put [] PUT_ARRAY_TYPE = new Put[0];
|
||||
private static final Delete [] DELETE_ARRAY_TYPE = new Delete[]{};
|
||||
private static final Put [] PUT_ARRAY_TYPE = new Put[]{};
|
||||
|
||||
// Register a shutdown hook, one that cleans up RPC and closes zk sessions.
|
||||
static {
|
||||
|
@ -112,7 +112,7 @@ public class HConnectionManager implements HConstants {
|
|||
/**
|
||||
* Get the connection object for the instance specified by the configuration
|
||||
* If no current connection exists, create a new connection for that instance
|
||||
* @param conf
|
||||
* @param conf configuration
|
||||
* @return HConnection object for the instance specified by the configuration
|
||||
*/
|
||||
public static HConnection getConnection(Configuration conf) {
|
||||
|
@ -130,13 +130,14 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
/**
|
||||
* Delete connection information for the instance specified by configuration
|
||||
* @param conf
|
||||
* @param stopProxy
|
||||
* @param conf configuration
|
||||
* @param stopProxy stop the proxy as well
|
||||
*/
|
||||
public static void deleteConnectionInfo(Configuration conf,
|
||||
boolean stopProxy) {
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
TableServers t = HBASE_INSTANCES.remove(conf);
|
||||
Integer key = HBaseConfiguration.hashCode(conf);
|
||||
TableServers t = HBASE_INSTANCES.remove(key);
|
||||
if (t != null) {
|
||||
t.close(stopProxy);
|
||||
}
|
||||
|
@ -145,7 +146,7 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
/**
|
||||
* Delete information for all connections.
|
||||
* @param stopProxy
|
||||
* @param stopProxy stop the proxy as well
|
||||
*/
|
||||
public static void deleteAllConnections(boolean stopProxy) {
|
||||
synchronized (HBASE_INSTANCES) {
|
||||
|
@ -166,9 +167,9 @@ public class HConnectionManager implements HConstants {
|
|||
* Get a watcher of a zookeeper connection for a given quorum address.
|
||||
* If the connection isn't established, a new one is created.
|
||||
* This acts like a multiton.
|
||||
* @param conf
|
||||
* @param conf configuration
|
||||
* @return ZKW watcher
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public static synchronized ClientZKWatcher getClientZooKeeperWatcher(
|
||||
Configuration conf) throws IOException {
|
||||
|
@ -193,8 +194,7 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
/**
|
||||
* Takes a configuration to pass it to ZKW but won't instanciate it
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
* @param conf configuration
|
||||
*/
|
||||
public ClientZKWatcher(Configuration conf) {
|
||||
this.conf = conf;
|
||||
|
@ -224,6 +224,7 @@ public class HConnectionManager implements HConstants {
|
|||
/**
|
||||
* Get this watcher's ZKW, instanciate it if necessary.
|
||||
* @return ZKW
|
||||
* @throws java.io.IOException if a remote or network exception occurs
|
||||
*/
|
||||
public synchronized ZooKeeperWrapper getZooKeeperWrapper() throws IOException {
|
||||
if(zooKeeperWrapper == null) {
|
||||
|
@ -296,7 +297,7 @@ public class HConnectionManager implements HConstants {
|
|||
"Unable to find region server interface " + serverClassName, e);
|
||||
}
|
||||
|
||||
this.pause = conf.getLong("hbase.client.pause", 1 * 1000);
|
||||
this.pause = conf.getLong("hbase.client.pause", 1000);
|
||||
this.numRetries = conf.getInt("hbase.client.retries.number", 10);
|
||||
this.maxRPCAttempts = conf.getInt("hbase.client.rpc.maxattempts", 1);
|
||||
this.rpcTimeout = conf.getLong(HBASE_REGIONSERVER_LEASE_PERIOD_KEY, DEFAULT_HBASE_REGIONSERVER_LEASE_PERIOD);
|
||||
|
@ -327,7 +328,7 @@ public class HConnectionManager implements HConstants {
|
|||
}
|
||||
|
||||
public HMasterInterface getMaster() throws MasterNotRunningException {
|
||||
ZooKeeperWrapper zk = null;
|
||||
ZooKeeperWrapper zk;
|
||||
try {
|
||||
zk = getZooKeeperWrapper();
|
||||
} catch (IOException e) {
|
||||
|
@ -409,8 +410,8 @@ public class HConnectionManager implements HConstants {
|
|||
boolean exists = false;
|
||||
try {
|
||||
HTableDescriptor[] tables = listTables();
|
||||
for (int i = 0; i < tables.length; i++) {
|
||||
if (Bytes.equals(tables[i].getName(), tableName)) {
|
||||
for (HTableDescriptor table : tables) {
|
||||
if (Bytes.equals(table.getName(), tableName)) {
|
||||
exists = true;
|
||||
}
|
||||
}
|
||||
|
@ -515,8 +516,8 @@ public class HConnectionManager implements HConstants {
|
|||
int rowsOffline = 0;
|
||||
byte[] startKey =
|
||||
HRegionInfo.createRegionName(tableName, null, HConstants.ZEROES);
|
||||
byte[] endKey = null;
|
||||
HRegionInfo currentRegion = null;
|
||||
byte[] endKey;
|
||||
HRegionInfo currentRegion;
|
||||
Scan scan = new Scan(startKey);
|
||||
scan.addColumn(CATALOG_FAMILY, REGIONINFO_QUALIFIER);
|
||||
int rows = this.conf.getInt("hbase.meta.scanner.caching", 100);
|
||||
|
@ -528,13 +529,9 @@ public class HConnectionManager implements HConstants {
|
|||
// Open scanner
|
||||
getRegionServerWithRetries(s);
|
||||
do {
|
||||
HRegionInfo oldRegion = currentRegion;
|
||||
if (oldRegion != null) {
|
||||
startKey = oldRegion.getEndKey();
|
||||
}
|
||||
currentRegion = s.getHRegionInfo();
|
||||
Result r = null;
|
||||
Result [] rrs = null;
|
||||
Result r;
|
||||
Result [] rrs;
|
||||
while ((rrs = getRegionServerWithRetries(s)) != null && rrs.length > 0) {
|
||||
r = rrs[0];
|
||||
byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
|
||||
|
@ -651,10 +648,11 @@ public class HConnectionManager implements HConstants {
|
|||
* Search one of the meta tables (-ROOT- or .META.) for the HRegionLocation
|
||||
* info that contains the table and row we're seeking.
|
||||
*/
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
private HRegionLocation locateRegionInMeta(final byte [] parentTable,
|
||||
final byte [] tableName, final byte [] row, boolean useCache)
|
||||
throws IOException {
|
||||
HRegionLocation location = null;
|
||||
HRegionLocation location;
|
||||
// If supposed to be using the cache, then check it for a possible hit.
|
||||
// Otherwise, delete any existing cached location so it won't interfere.
|
||||
if (useCache) {
|
||||
|
@ -880,7 +878,7 @@ public class HConnectionManager implements HConstants {
|
|||
final byte [] tableName) {
|
||||
// find the map of cached locations for this table
|
||||
Integer key = Bytes.mapKey(tableName);
|
||||
SoftValueSortedMap<byte [], HRegionLocation> result = null;
|
||||
SoftValueSortedMap<byte [], HRegionLocation> result;
|
||||
synchronized (this.cachedRegionLocations) {
|
||||
result = this.cachedRegionLocations.get(key);
|
||||
// if tableLocations for this table isn't built yet, make one
|
||||
|
@ -1042,6 +1040,7 @@ public class HConnectionManager implements HConstants {
|
|||
HRegionInfo.ROOT_REGIONINFO, rootRegionAddress);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
public <T> T getRegionServerWithRetries(ServerCallable<T> callable)
|
||||
throws IOException, RuntimeException {
|
||||
List<Throwable> exceptions = new ArrayList<Throwable>();
|
||||
|
@ -1093,6 +1092,7 @@ public class HConnectionManager implements HConstants {
|
|||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
private HRegionLocation
|
||||
getRegionLocationForRowWithRetries(byte[] tableName, byte[] rowKey,
|
||||
boolean reload)
|
||||
|
@ -1140,29 +1140,29 @@ public class HConnectionManager implements HConstants {
|
|||
|
||||
/**
|
||||
* This is the method subclasses must implement.
|
||||
* @param currentList
|
||||
* @param tableName
|
||||
* @param row
|
||||
* @param currentList current list of rows
|
||||
* @param tableName table we are processing
|
||||
* @param row row
|
||||
* @return Count of items processed or -1 if all.
|
||||
* @throws IOException
|
||||
* @throws RuntimeException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RuntimeException other undefined exception
|
||||
*/
|
||||
abstract int doCall(final List<Row> currentList,
|
||||
abstract int doCall(final List<? extends Row> currentList,
|
||||
final byte [] row, final byte [] tableName)
|
||||
throws IOException, RuntimeException;
|
||||
|
||||
/**
|
||||
* Process the passed <code>list</code>.
|
||||
* @param list
|
||||
* @param tableName
|
||||
* @param list list of rows to process
|
||||
* @param tableName table we are processing
|
||||
* @return Count of how many added or -1 if all added.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
int process(final List<? extends Row> list, final byte[] tableName)
|
||||
throws IOException {
|
||||
byte [] region = getRegionName(tableName, list.get(0).getRow(), false);
|
||||
byte [] currentRegion = region;
|
||||
boolean isLastRow = false;
|
||||
boolean isLastRow;
|
||||
boolean retryOnlyOne = false;
|
||||
List<Row> currentList = new ArrayList<Row>();
|
||||
int i, tries;
|
||||
|
@ -1250,7 +1250,7 @@ public class HConnectionManager implements HConstants {
|
|||
if (list.size() > 1) Collections.sort(list);
|
||||
Batch b = new Batch(this) {
|
||||
@Override
|
||||
int doCall(final List<Row> currentList, final byte [] row,
|
||||
int doCall(final List<? extends Row> currentList, final byte [] row,
|
||||
final byte [] tableName)
|
||||
throws IOException, RuntimeException {
|
||||
final Put [] puts = currentList.toArray(PUT_ARRAY_TYPE);
|
||||
|
@ -1272,7 +1272,7 @@ public class HConnectionManager implements HConstants {
|
|||
if (list.size() > 1) Collections.sort(list);
|
||||
Batch b = new Batch(this) {
|
||||
@Override
|
||||
int doCall(final List<Row> currentList, final byte [] row,
|
||||
int doCall(final List<? extends Row> currentList, final byte [] row,
|
||||
final byte [] tableName)
|
||||
throws IOException, RuntimeException {
|
||||
final Delete [] deletes = currentList.toArray(DELETE_ARRAY_TYPE);
|
||||
|
@ -1303,6 +1303,7 @@ public class HConnectionManager implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
public void processBatchOfPuts(List<Put> list,
|
||||
final byte[] tableName, ExecutorService pool) throws IOException {
|
||||
for ( int tries = 0 ; tries < numRetries && !list.isEmpty(); ++tries) {
|
||||
|
@ -1389,8 +1390,7 @@ public class HConnectionManager implements HConstants {
|
|||
" ms!");
|
||||
try {
|
||||
Thread.sleep(sleepTime);
|
||||
} catch (InterruptedException e) {
|
||||
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,21 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -52,6 +37,21 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
|
||||
/**
|
||||
* Used to communicate with a single HBase table.
|
||||
|
@ -79,7 +79,7 @@ public class HTable implements HTableInterface {
|
|||
* Creates an object to access a HBase table
|
||||
*
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTable(final String tableName)
|
||||
throws IOException {
|
||||
|
@ -90,7 +90,7 @@ public class HTable implements HTableInterface {
|
|||
* Creates an object to access a HBase table
|
||||
*
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTable(final byte [] tableName)
|
||||
throws IOException {
|
||||
|
@ -102,7 +102,7 @@ public class HTable implements HTableInterface {
|
|||
*
|
||||
* @param conf configuration object
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTable(Configuration conf, final String tableName)
|
||||
throws IOException {
|
||||
|
@ -115,7 +115,7 @@ public class HTable implements HTableInterface {
|
|||
*
|
||||
* @param conf configuration object
|
||||
* @param tableName name of the table
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HTable(Configuration conf, final byte [] tableName)
|
||||
throws IOException {
|
||||
|
@ -163,7 +163,7 @@ public class HTable implements HTableInterface {
|
|||
* TODO Might want to change this to public, would be nice if the number
|
||||
* of threads would automatically change when servers were added and removed
|
||||
* @return the number of region servers that are currently running
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
private int getCurrentNrHRS() throws IOException {
|
||||
HBaseAdmin admin = new HBaseAdmin(this.configuration);
|
||||
|
@ -176,7 +176,7 @@ public class HTable implements HTableInterface {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public static boolean isTableEnabled(String tableName) throws IOException {
|
||||
return isTableEnabled(Bytes.toBytes(tableName));
|
||||
|
@ -184,7 +184,7 @@ public class HTable implements HTableInterface {
|
|||
/**
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public static boolean isTableEnabled(byte[] tableName) throws IOException {
|
||||
return isTableEnabled(HBaseConfiguration.create(), tableName);
|
||||
|
@ -194,7 +194,7 @@ public class HTable implements HTableInterface {
|
|||
* @param conf HBaseConfiguration object
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public static boolean isTableEnabled(Configuration conf, String tableName)
|
||||
throws IOException {
|
||||
|
@ -205,7 +205,7 @@ public class HTable implements HTableInterface {
|
|||
* @param conf HBaseConfiguration object
|
||||
* @param tableName name of table to check
|
||||
* @return true if table is on-line
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public static boolean isTableEnabled(Configuration conf, byte[] tableName)
|
||||
throws IOException {
|
||||
|
@ -216,7 +216,7 @@ public class HTable implements HTableInterface {
|
|||
* Find region location hosting passed row using cached info
|
||||
* @param row Row to find.
|
||||
* @return Location of row.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionLocation getRegionLocation(final String row)
|
||||
throws IOException {
|
||||
|
@ -227,7 +227,7 @@ public class HTable implements HTableInterface {
|
|||
* Find region location hosting passed row using cached info
|
||||
* @param row Row to find.
|
||||
* @return Location of row.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public HRegionLocation getRegionLocation(final byte [] row)
|
||||
throws IOException {
|
||||
|
@ -277,7 +277,7 @@ public class HTable implements HTableInterface {
|
|||
* Gets the starting row key for every region in the currently open table
|
||||
*
|
||||
* @return Array of region starting row keys
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public byte [][] getStartKeys() throws IOException {
|
||||
return getStartEndKeys().getFirst();
|
||||
|
@ -287,7 +287,7 @@ public class HTable implements HTableInterface {
|
|||
* Gets the ending row key for every region in the currently open table
|
||||
*
|
||||
* @return Array of region ending row keys
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public byte[][] getEndKeys() throws IOException {
|
||||
return getStartEndKeys().getSecond();
|
||||
|
@ -298,7 +298,7 @@ public class HTable implements HTableInterface {
|
|||
* open table
|
||||
*
|
||||
* @return Pair of arrays of region starting and ending row keys
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
|
||||
|
@ -327,7 +327,7 @@ public class HTable implements HTableInterface {
|
|||
* Get all the regions and their address for this table
|
||||
*
|
||||
* @return A map of HRegionInfo with it's server address
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public Map<HRegionInfo, HServerAddress> getRegionsInfo() throws IOException {
|
||||
final Map<HRegionInfo, HServerAddress> regionMap =
|
||||
|
@ -444,8 +444,9 @@ public class HTable implements HTableInterface {
|
|||
}
|
||||
|
||||
/**
|
||||
* Execute a delete
|
||||
*
|
||||
* @param delete
|
||||
* @param delete the delete
|
||||
* @throws IOException
|
||||
* @since 0.20.0
|
||||
*/
|
||||
|
@ -482,7 +483,7 @@ public class HTable implements HTableInterface {
|
|||
* Commit a Put to the table.
|
||||
* <p>
|
||||
* If autoFlush is false, the update is buffered.
|
||||
* @param put
|
||||
* @param put data to put
|
||||
* @throws IOException
|
||||
* @since 0.20.0
|
||||
*/
|
||||
|
@ -494,24 +495,15 @@ public class HTable implements HTableInterface {
|
|||
* Commit a List of Puts to the table.
|
||||
* <p>
|
||||
* If autoFlush is false, the update is buffered.
|
||||
* @param puts
|
||||
* @throws IOException
|
||||
* @param puts list of puts
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @since 0.20.0
|
||||
*/
|
||||
public synchronized void put(final List<Put> puts) throws IOException {
|
||||
doPut(puts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Internal helper method.
|
||||
* Need to synchronize this instance to prevent race conditions on the internal
|
||||
* data structures.
|
||||
* <p>
|
||||
* If autoFlush is false, the update is buffered.
|
||||
* @param puts
|
||||
* @throws IOException
|
||||
*/
|
||||
private void doPut(final List<Put> puts) throws IOException {
|
||||
private void doPut(final List<Put> puts) throws IOException {
|
||||
for (Put put : puts) {
|
||||
validatePut(put);
|
||||
writeBuffer.add(put);
|
||||
|
@ -526,12 +518,12 @@ public class HTable implements HTableInterface {
|
|||
* Atomically increments a column value. If the column value already exists
|
||||
* and is not a big-endian long, this could throw an exception.<p>
|
||||
*
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param amount
|
||||
* @return The new value.
|
||||
* @throws IOException
|
||||
* @param row row
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param amount long amount to increment by
|
||||
* @return The new value after incrementing
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount)
|
||||
|
@ -545,14 +537,15 @@ public class HTable implements HTableInterface {
|
|||
*
|
||||
* Setting writeToWAL to false means that in a fail scenario, you will lose
|
||||
* any increments that have not been flushed.
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param amount
|
||||
* @param row row
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param amount long amount to increment by
|
||||
* @param writeToWAL true if increment should be applied to WAL, false if not
|
||||
* @return The new value.
|
||||
* @throws IOException
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@SuppressWarnings({"ThrowableInstanceNeverThrown"})
|
||||
public long incrementColumnValue(final byte [] row, final byte [] family,
|
||||
final byte [] qualifier, final long amount, final boolean writeToWAL)
|
||||
throws IOException {
|
||||
|
@ -563,9 +556,8 @@ public class HTable implements HTableInterface {
|
|||
npe = new NullPointerException("column is null");
|
||||
}
|
||||
if (npe != null) {
|
||||
IOException io = new IOException(
|
||||
throw new IOException(
|
||||
"Invalid arguments to incrementColumnValue", npe);
|
||||
throw io;
|
||||
}
|
||||
return connection.getRegionServerWithRetries(
|
||||
new ServerCallable<Long>(connection, tableName, row) {
|
||||
|
@ -580,13 +572,14 @@ public class HTable implements HTableInterface {
|
|||
|
||||
/**
|
||||
* Atomically checks if a row/family/qualifier value match the expectedValue.
|
||||
* If it does, it adds the put.
|
||||
* If it does, it adds the put. If value == null, checks for non-existance
|
||||
* of the value.
|
||||
*
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param row to check
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param value the expected value
|
||||
* @param put
|
||||
* @param put put to execute if value matches.
|
||||
* @throws IOException
|
||||
* @return true if the new put was execute, false otherwise
|
||||
*/
|
||||
|
@ -598,10 +591,10 @@ public class HTable implements HTableInterface {
|
|||
new ServerCallable<Boolean>(connection, tableName, row) {
|
||||
public Boolean call() throws IOException {
|
||||
return server.checkAndPut(location.getRegionInfo().getRegionName(),
|
||||
row, family, qualifier, value, put)? Boolean.TRUE: Boolean.FALSE;
|
||||
row, family, qualifier, value, put) ? Boolean.TRUE : Boolean.FALSE;
|
||||
}
|
||||
}
|
||||
).booleanValue();
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -611,35 +604,35 @@ public class HTable implements HTableInterface {
|
|||
*
|
||||
* This is a server-side call so it prevents any data from being transfered
|
||||
* to the client.
|
||||
* @param get
|
||||
* @param get param to check for
|
||||
* @return true if the specified Get matches one or more keys, false if not
|
||||
* @throws IOException
|
||||
*/
|
||||
public boolean exists(final Get get) throws IOException {
|
||||
return connection.getRegionServerWithRetries(
|
||||
new ServerCallable<Boolean>(connection, tableName, get.getRow()) {
|
||||
public Boolean call() throws IOException {
|
||||
return Boolean.valueOf(server.
|
||||
exists(location.getRegionInfo().getRegionName(), get));
|
||||
new ServerCallable<Boolean>(connection, tableName, get.getRow()) {
|
||||
public Boolean call() throws IOException {
|
||||
return server.
|
||||
exists(location.getRegionInfo().getRegionName(), get);
|
||||
}
|
||||
}
|
||||
}
|
||||
).booleanValue();
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Commit to the table the buffer of BatchUpdate.
|
||||
* Commit to the table the buffer of Puts.
|
||||
* Called automatically in the commit methods when autoFlush is true.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void flushCommits() throws IOException {
|
||||
try {
|
||||
connection.processBatchOfPuts(writeBuffer,
|
||||
tableName, pool);
|
||||
} finally {
|
||||
// the write buffer was adjsuted by processBatchOfPuts
|
||||
// the write buffer was adjusted by processBatchOfPuts
|
||||
currentWriteBufferSize = 0;
|
||||
for (int i = 0; i < writeBuffer.size(); i++) {
|
||||
currentWriteBufferSize += writeBuffer.get(i).heapSize();
|
||||
for (Put aWriteBuffer : writeBuffer) {
|
||||
currentWriteBufferSize += aWriteBuffer.heapSize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -653,12 +646,7 @@ public class HTable implements HTableInterface {
|
|||
flushCommits();
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method that verifies Put is well formed.
|
||||
*
|
||||
* @param put
|
||||
* @throws IllegalArgumentException
|
||||
*/
|
||||
// validate for well-formedness
|
||||
private void validatePut(final Put put) throws IllegalArgumentException{
|
||||
if (put.isEmpty()) {
|
||||
throw new IllegalArgumentException("No columns to insert");
|
||||
|
@ -687,8 +675,7 @@ public class HTable implements HTableInterface {
|
|||
public RowLock call() throws IOException {
|
||||
long lockId =
|
||||
server.lockRow(location.getRegionInfo().getRegionName(), row);
|
||||
RowLock rowLock = new RowLock(row,lockId);
|
||||
return rowLock;
|
||||
return new RowLock(row,lockId);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -721,8 +708,11 @@ public class HTable implements HTableInterface {
|
|||
}
|
||||
|
||||
/**
|
||||
* Set if this instanciation of HTable will autoFlush
|
||||
* @param autoFlush
|
||||
* Turning off autoflush will cause operations to be batched for greater
|
||||
* efficiency in the RPC. Also see @{link #flushCommits}
|
||||
*
|
||||
* @param autoFlush flag
|
||||
* @see #flushCommits
|
||||
*/
|
||||
public void setAutoFlush(boolean autoFlush) {
|
||||
this.autoFlush = autoFlush;
|
||||
|
@ -740,8 +730,8 @@ public class HTable implements HTableInterface {
|
|||
* Set the size of the buffer in bytes.
|
||||
* If the new size is lower than the current size of data in the
|
||||
* write buffer, the buffer is flushed.
|
||||
* @param writeBufferSize
|
||||
* @throws IOException
|
||||
* @param writeBufferSize new write buffer size
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void setWriteBufferSize(long writeBufferSize) throws IOException {
|
||||
this.writeBufferSize = writeBufferSize;
|
||||
|
@ -814,10 +804,7 @@ public class HTable implements HTableInterface {
|
|||
return lastNext;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param endKey
|
||||
* @return Returns true if the passed region endkey.
|
||||
*/
|
||||
// returns true if the passed region endKey
|
||||
private boolean checkScanStopRow(final byte [] endKey) {
|
||||
if (this.scan.getStopRow().length > 0) {
|
||||
// there is a stop row, check to see if we are past it.
|
||||
|
@ -852,7 +839,7 @@ public class HTable implements HTableInterface {
|
|||
}
|
||||
|
||||
// Where to start the next scanner
|
||||
byte [] localStartKey = null;
|
||||
byte [] localStartKey;
|
||||
|
||||
// if we're at end of table, close and return false to stop iterating
|
||||
if (this.currentRegion != null) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,10 +19,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Factory for creating HTable instances.
|
||||
*
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Used to communicate with a single HBase table.
|
||||
*
|
||||
|
@ -49,7 +49,7 @@ public interface HTableInterface {
|
|||
* Gets the table descriptor for this table.
|
||||
*
|
||||
* @return table metadata
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
HTableDescriptor getTableDescriptor() throws IOException;
|
||||
|
||||
|
@ -65,7 +65,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* @param get the Get
|
||||
* @return true if the specified Get matches one or more keys, false if not
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
boolean exists(Get get) throws IOException;
|
||||
|
||||
|
@ -76,7 +76,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* @param get the Get to fetch
|
||||
* @return the result
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
Result get(Get get) throws IOException;
|
||||
|
||||
|
@ -87,7 +87,7 @@ public interface HTableInterface {
|
|||
* @param row row key
|
||||
* @param family Column family to look for row in
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
Result getRowOrBefore(byte[] row, byte[] family) throws IOException;
|
||||
|
||||
|
@ -95,8 +95,8 @@ public interface HTableInterface {
|
|||
* Get a scanner on the current table as specified by the {@link Scan} object.
|
||||
*
|
||||
* @param scan a configured {@link Scan} object
|
||||
* @return scanner
|
||||
* @throws IOException
|
||||
* @return the scanner
|
||||
* @throws IOException e
|
||||
*/
|
||||
ResultScanner getScanner(Scan scan) throws IOException;
|
||||
|
||||
|
@ -105,7 +105,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* @param family the column family to scan
|
||||
* @return the scanner
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
ResultScanner getScanner(byte[] family) throws IOException;
|
||||
|
||||
|
@ -115,7 +115,7 @@ public interface HTableInterface {
|
|||
* @param family the column family to scan
|
||||
* @param qualifier the column qualifier to scan
|
||||
* @return The scanner
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException;
|
||||
|
||||
|
@ -124,8 +124,8 @@ public interface HTableInterface {
|
|||
* <p>
|
||||
* If autoFlush is false, the update is buffered.
|
||||
*
|
||||
* @param put
|
||||
* @throws IOException
|
||||
* @param put data
|
||||
* @throws IOException e
|
||||
*/
|
||||
void put(Put put) throws IOException;
|
||||
|
||||
|
@ -134,21 +134,22 @@ public interface HTableInterface {
|
|||
* <p>
|
||||
* If autoFlush is false, the update is buffered.
|
||||
*
|
||||
* @param puts
|
||||
* @throws IOException
|
||||
* @param puts list of puts
|
||||
* @throws IOException e
|
||||
*/
|
||||
void put(List<Put> puts) throws IOException;
|
||||
|
||||
/**
|
||||
* Atomically checks if a row/family/qualifier value matches the expected
|
||||
* value. If it does, it adds the put.
|
||||
* value. If it does, it adds the put. If the passed value is null, the check
|
||||
* is for the lack of column (ie: non-existance)
|
||||
*
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param row to check
|
||||
* @param family column family to check
|
||||
* @param qualifier column qualifier to check
|
||||
* @param value the expected value
|
||||
* @param put
|
||||
* @throws IOException
|
||||
* @param put data to put if check succeeds
|
||||
* @throws IOException e
|
||||
* @return true if the new put was executed, false otherwise
|
||||
*/
|
||||
boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier,
|
||||
|
@ -157,8 +158,8 @@ public interface HTableInterface {
|
|||
/**
|
||||
* Deletes as specified by the delete.
|
||||
*
|
||||
* @param delete
|
||||
* @throws IOException
|
||||
* @param delete a delete
|
||||
* @throws IOException e
|
||||
*/
|
||||
void delete(Delete delete) throws IOException;
|
||||
|
||||
|
@ -166,7 +167,7 @@ public interface HTableInterface {
|
|||
* Bulk commit a List of Deletes to the table.
|
||||
* @param deletes List of deletes. List is modified by this method.
|
||||
* On exception holds deletes that were NOT applied.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
void delete(List<Delete> deletes) throws IOException;
|
||||
|
||||
|
@ -176,12 +177,12 @@ public interface HTableInterface {
|
|||
* value does not yet exist it is initialized to <code>amount</code> and
|
||||
* written to the specified column.
|
||||
*
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param amount
|
||||
* @param row row to increment
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param amount long amount to increment
|
||||
* @return the new value
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
||||
long amount) throws IOException;
|
||||
|
@ -194,13 +195,13 @@ public interface HTableInterface {
|
|||
*
|
||||
* <p>Setting writeToWAL to false means that in a fail scenario, you will lose
|
||||
* any increments that have not been flushed.
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param amount
|
||||
* @param row row to increment
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param amount long amount to increment
|
||||
* @param writeToWAL true if increment should be applied to WAL, false if not
|
||||
* @return The new value.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier,
|
||||
long amount, boolean writeToWAL) throws IOException;
|
||||
|
@ -215,14 +216,14 @@ public interface HTableInterface {
|
|||
/**
|
||||
* Flushes buffer data. Called automatically when autoFlush is true.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
void flushCommits() throws IOException;
|
||||
|
||||
/**
|
||||
* Releases held resources.
|
||||
*
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
void close() throws IOException;
|
||||
|
||||
|
@ -231,7 +232,7 @@ public interface HTableInterface {
|
|||
*
|
||||
* @param row the row to lock
|
||||
* @return rowLock RowLock containing row and lock id
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
RowLock lockRow(byte[] row) throws IOException;
|
||||
|
||||
|
@ -239,7 +240,7 @@ public interface HTableInterface {
|
|||
* Releases the row lock.
|
||||
*
|
||||
* @param rl the row lock to release
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
void unlockRow(RowLock rl) throws IOException;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,15 +19,15 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* A simple pool of HTable instances.<p>
|
||||
*
|
||||
|
@ -73,7 +73,7 @@ public class HTablePool {
|
|||
* Get a reference to the specified table from the pool.<p>
|
||||
*
|
||||
* Create a new one if one is not available.
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return a reference to the specified table
|
||||
* @throws RuntimeException if there is a problem instantiating the HTable
|
||||
*/
|
||||
|
@ -98,7 +98,7 @@ public class HTablePool {
|
|||
* Get a reference to the specified table from the pool.<p>
|
||||
*
|
||||
* Create a new one if one is not available.
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @return a reference to the specified table
|
||||
* @throws RuntimeException if there is a problem instantiating the HTable
|
||||
*/
|
||||
|
@ -111,7 +111,7 @@ public class HTablePool {
|
|||
*
|
||||
* If the pool already contains <i>maxSize</i> references to the table,
|
||||
* then nothing happens.
|
||||
* @param table
|
||||
* @param table table
|
||||
*/
|
||||
public void putTable(HTableInterface table) {
|
||||
LinkedList<HTableInterface> queue = tables.get(Bytes.toString(table.getTableName()));
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Scanner class that contains the <code>.META.</code> table scanning logic
|
||||
* and uses a Retryable scanner. Provided visitors will be called
|
||||
|
@ -18,9 +18,9 @@ class MetaScanner implements HConstants {
|
|||
* Scans the meta table and calls a visitor on each RowResult and uses a empty
|
||||
* start row value as table name.
|
||||
*
|
||||
* @param configuration
|
||||
* @param configuration conf
|
||||
* @param visitor A custom visitor
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static void metaScan(Configuration configuration,
|
||||
MetaScannerVisitor visitor)
|
||||
|
@ -32,10 +32,10 @@ class MetaScanner implements HConstants {
|
|||
* Scans the meta table and calls a visitor on each RowResult. Uses a table
|
||||
* name to locate meta regions.
|
||||
*
|
||||
* @param configuration
|
||||
* @param visitor
|
||||
* @param tableName
|
||||
* @throws IOException
|
||||
* @param configuration config
|
||||
* @param visitor visitor object
|
||||
* @param tableName table name
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static void metaScan(Configuration configuration,
|
||||
MetaScannerVisitor visitor, byte[] tableName)
|
||||
|
@ -46,7 +46,7 @@ class MetaScanner implements HConstants {
|
|||
HRegionInfo.createRegionName(tableName, null, ZEROES);
|
||||
|
||||
// Scan over each meta region
|
||||
ScannerCallable callable = null;
|
||||
ScannerCallable callable;
|
||||
int rows = configuration.getInt("hbase.meta.scanner.caching", 100);
|
||||
do {
|
||||
Scan scan = new Scan(startRow).addFamily(CATALOG_FAMILY);
|
||||
|
@ -59,10 +59,10 @@ class MetaScanner implements HConstants {
|
|||
//we have all the rows here
|
||||
Result [] rrs = connection.getRegionServerWithRetries(callable);
|
||||
if (rrs == null || rrs.length == 0 || rrs[0].size() == 0) {
|
||||
break done; //exit completely
|
||||
break; //exit completely
|
||||
}
|
||||
for (int i = 0; i < rrs.length; i++) {
|
||||
if (!visitor.processRow(rrs[i]))
|
||||
for (Result rr : rrs) {
|
||||
if (!visitor.processRow(rr))
|
||||
break done; //exit completely
|
||||
}
|
||||
//here, we didn't break anywhere. Check if we have more rows
|
||||
|
@ -86,9 +86,9 @@ class MetaScanner implements HConstants {
|
|||
* Implementations can return false to stop the region's loop if it becomes
|
||||
* unnecessary for some reason.
|
||||
*
|
||||
* @param rowResult
|
||||
* @param rowResult result
|
||||
* @return A boolean to know if it should continue to loop in the region
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public boolean processRow(Result rowResult) throws IOException;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,27 +20,37 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInput;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* Data type class for putting multiple regions worth of puts in one RPC.
|
||||
*/
|
||||
public class MultiPut implements Writable {
|
||||
public HServerAddress address; // client code ONLY
|
||||
|
||||
// map of regions to lists of puts for that region.
|
||||
public Map<byte[], List<Put> > puts = new TreeMap<byte[], List<Put>>(Bytes.BYTES_COMPARATOR);
|
||||
|
||||
/**
|
||||
* Writable constructor only.
|
||||
*/
|
||||
public MultiPut() {}
|
||||
|
||||
/**
|
||||
* MultiPut for putting multiple regions worth of puts in one RPC.
|
||||
* @param a address
|
||||
*/
|
||||
public MultiPut(HServerAddress a) {
|
||||
address = a;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,22 +20,23 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInput;
|
||||
import java.util.Map;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* Response class for MultiPut.
|
||||
*/
|
||||
public class MultiPutResponse implements Writable {
|
||||
|
||||
public MultiPut request; // used in client code ONLY
|
||||
protected MultiPut request; // used in client code ONLY
|
||||
|
||||
public Map<byte[], Integer> answers = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
||||
protected Map<byte[], Integer> answers = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR);
|
||||
|
||||
public MultiPutResponse() {}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,6 +20,13 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -29,14 +36,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
|
||||
|
||||
/**
|
||||
* Used to perform Put operations for a single row.
|
||||
|
@ -128,6 +127,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @param value column value
|
||||
* @return this
|
||||
*/
|
||||
public Put add(byte [] family, byte [] qualifier, byte [] value) {
|
||||
return add(family, qualifier, this.timestamp, value);
|
||||
|
@ -140,6 +140,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* @param qualifier column qualifier
|
||||
* @param ts version timestamp
|
||||
* @param value column value
|
||||
* @return this
|
||||
*/
|
||||
public Put add(byte [] family, byte [] qualifier, long ts, byte [] value) {
|
||||
List<KeyValue> list = getKeyValueList(family);
|
||||
|
@ -153,7 +154,9 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* Add the specified KeyValue to this Put operation. Operation assumes that
|
||||
* the passed KeyValue is immutable and its backing array will not be modified
|
||||
* for the duration of this Put.
|
||||
* @param kv
|
||||
* @param kv individual KeyValue
|
||||
* @return this
|
||||
* @throws java.io.IOException e
|
||||
*/
|
||||
public Put add(KeyValue kv) throws IOException{
|
||||
byte [] family = kv.getFamily();
|
||||
|
@ -172,13 +175,9 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Create a KeyValue with this objects row key and the Put identifier.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param ts
|
||||
* @param value
|
||||
* @return a KeyValue with this objects row key and the Put identifier.
|
||||
*/
|
||||
private KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts,
|
||||
|
@ -192,8 +191,8 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* a value assigned to the given family & qualifier.
|
||||
* Both given arguments must match the KeyValue object to return true.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @return returns true if the given family and qualifier already has an
|
||||
* existing KeyValue object in the family map.
|
||||
*/
|
||||
|
@ -206,9 +205,9 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* a value assigned to the given family, qualifier and timestamp.
|
||||
* All 3 given arguments must match the KeyValue object to return true.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param ts
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param ts timestamp
|
||||
* @return returns true if the given family, qualifier and timestamp already has an
|
||||
* existing KeyValue object in the family map.
|
||||
*/
|
||||
|
@ -221,9 +220,9 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* a value assigned to the given family, qualifier and timestamp.
|
||||
* All 3 given arguments must match the KeyValue object to return true.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param value
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param value value to check
|
||||
* @return returns true if the given family, qualifier and value already has an
|
||||
* existing KeyValue object in the family map.
|
||||
*/
|
||||
|
@ -236,10 +235,10 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* the given value assigned to the given family, qualifier and timestamp.
|
||||
* All 4 given arguments must match the KeyValue object to return true.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param ts
|
||||
* @param value
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param ts timestamp
|
||||
* @param value value to check
|
||||
* @return returns true if the given family, qualifier timestamp and value
|
||||
* already has an existing KeyValue object in the family map.
|
||||
*/
|
||||
|
@ -247,7 +246,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
return has(family, qualifier, ts, value, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Private method to determine if this object's familyMap contains
|
||||
* the given value assigned to the given family, qualifier and timestamp
|
||||
* respecting the 2 boolean arguments
|
||||
|
@ -264,9 +263,14 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
private boolean has(byte [] family, byte [] qualifier, long ts, byte [] value,
|
||||
boolean ignoreTS, boolean ignoreValue) {
|
||||
List<KeyValue> list = getKeyValueList(family);
|
||||
if (list.size() == 0 ) {
|
||||
if (list.size() == 0) {
|
||||
return false;
|
||||
}
|
||||
// Boolean analysis of ignoreTS/ignoreValue.
|
||||
// T T => 2
|
||||
// T F => 3 (first is always true)
|
||||
// F T => 2
|
||||
// F F => 1
|
||||
if (!ignoreTS && !ignoreValue) {
|
||||
KeyValue kv = createPutKeyValue(family, qualifier, ts, value);
|
||||
return (list.contains(kv));
|
||||
|
@ -277,20 +281,14 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
return true;
|
||||
}
|
||||
}
|
||||
} else if (ignoreTS) {
|
||||
} else {
|
||||
// ignoreTS is always true
|
||||
for (KeyValue kv: list) {
|
||||
if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(kv.getQualifier(), qualifier)
|
||||
&& Arrays.equals(kv.getValue(), value)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (KeyValue kv: list) {
|
||||
if (Arrays.equals(kv.getFamily(), family) && Arrays.equals(
|
||||
kv.getQualifier(), qualifier)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -298,8 +296,8 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
/**
|
||||
* Returns a list of all KeyValue objects with matching column family and qualifier.
|
||||
*
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @return a list of KeyValue objects with the matching family and qualifier,
|
||||
* returns an empty list if one doesnt exist for the given family.
|
||||
*/
|
||||
|
@ -317,7 +315,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* Creates an empty list if one doesnt exist for the given column family
|
||||
* or else it returns the associated list of KeyValue objects.
|
||||
*
|
||||
* @param family
|
||||
* @param family column family
|
||||
* @return a list of KeyValue objects, returns an empty list if one doesnt exist.
|
||||
*/
|
||||
private List<KeyValue> getKeyValueList(byte[] family) {
|
||||
|
@ -538,6 +536,7 @@ public class Put implements HeapSize, Writable, Row, Comparable<Row> {
|
|||
* @param ts version timestamp
|
||||
* @param value column value
|
||||
* @deprecated use {@link #add(byte[], byte[], long, byte[])} instead
|
||||
* @return true
|
||||
*/
|
||||
public Put add(byte [] column, long ts, byte [] value) {
|
||||
byte [][] parts = KeyValue.parseColumn(column);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.RegionException;
|
|||
/** Thrown when a table can not be located */
|
||||
public class RegionOfflineException extends RegionException {
|
||||
private static final long serialVersionUID = 466008402L;
|
||||
/** default constructor */
|
||||
/** default constructor */
|
||||
public RegionOfflineException() {
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,6 +20,12 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.SplitKeyValue;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -31,12 +37,6 @@ import java.util.Map;
|
|||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.SplitKeyValue;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/**
|
||||
* Single row result of a {@link Get} or {@link Scan} query.<p>
|
||||
*
|
||||
|
@ -152,7 +152,7 @@ public class Result implements Writable {
|
|||
if (isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Arrays.sort(kvs, (Comparator<KeyValue>)KeyValue.COMPARATOR);
|
||||
Arrays.sort(kvs, KeyValue.COMPARATOR);
|
||||
return kvs;
|
||||
}
|
||||
|
||||
|
@ -238,6 +238,7 @@ public class Result implements Writable {
|
|||
* Map of qualifiers to values.
|
||||
* <p>
|
||||
* Returns a Map of the form: <code>Map<qualifier,value></code>
|
||||
* @param family column family to get
|
||||
* @return map of qualifiers to values
|
||||
*/
|
||||
public NavigableMap<byte[], byte[]> getFamilyMap(byte [] family) {
|
||||
|
@ -319,10 +320,7 @@ public class Result implements Writable {
|
|||
return false;
|
||||
}
|
||||
NavigableMap<Long, byte[]> versionMap = getVersionMap(qualifierMap, qualifier);
|
||||
if(versionMap == null) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return versionMap != null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.client;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
||||
/**
|
||||
* Interface for client-side scanning.
|
||||
* Go to {@link HTable} to obtain instances.
|
||||
|
@ -34,14 +32,14 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
|
|||
* Grab the next row's worth of values. The scanner will return a Result.
|
||||
* @return Result object if there is another row, null if the scanner is
|
||||
* exhausted.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result next() throws IOException;
|
||||
|
||||
/**
|
||||
* @param nbRows number of rows to return
|
||||
* @return Between zero and <param>nbRows</param> Results
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result [] next(int nbRows) throws IOException;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -15,11 +15,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* Exception thrown by HTable methods when an attempt to do something (like
|
||||
* commit changes) fails after a bunch of retries.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -28,8 +28,8 @@ public class RowLock {
|
|||
|
||||
/**
|
||||
* Creates a RowLock from a row and lock id
|
||||
* @param row
|
||||
* @param lockId
|
||||
* @param row row to lock on
|
||||
* @param lockId the lock id
|
||||
*/
|
||||
public RowLock(final byte [] row, final long lockId) {
|
||||
this.row = row;
|
||||
|
@ -38,7 +38,7 @@ public class RowLock {
|
|||
|
||||
/**
|
||||
* Creates a RowLock with only a lock id
|
||||
* @param lockId
|
||||
* @param lockId lock id
|
||||
*/
|
||||
public RowLock(final long lockId) {
|
||||
this.lockId = lockId;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,14 +20,6 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -37,6 +29,14 @@ import org.apache.hadoop.hbase.util.Bytes;
|
|||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableFactories;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/**
|
||||
* Used to perform Scan operations.
|
||||
* <p>
|
||||
|
@ -154,6 +154,7 @@ public class Scan implements Writable {
|
|||
* <p>
|
||||
* Overrides previous calls to addColumn for this family.
|
||||
* @param family family name
|
||||
* @return this
|
||||
*/
|
||||
public Scan addFamily(byte [] family) {
|
||||
familyMap.remove(family);
|
||||
|
@ -167,6 +168,7 @@ public class Scan implements Writable {
|
|||
* Overrides previous calls to addFamily for this family.
|
||||
* @param family family name
|
||||
* @param qualifier column qualifier
|
||||
* @return this
|
||||
*/
|
||||
public Scan addColumn(byte [] family, byte [] qualifier) {
|
||||
NavigableSet<byte []> set = familyMap.get(family);
|
||||
|
@ -189,6 +191,7 @@ public class Scan implements Writable {
|
|||
* @throws IOException if invalid time range
|
||||
* @see #setMaxVersions()
|
||||
* @see #setMaxVersions(int)
|
||||
* @return this
|
||||
*/
|
||||
public Scan setTimeRange(long minStamp, long maxStamp)
|
||||
throws IOException {
|
||||
|
@ -204,6 +207,7 @@ public class Scan implements Writable {
|
|||
* @param timestamp version timestamp
|
||||
* @see #setMaxVersions()
|
||||
* @see #setMaxVersions(int)
|
||||
* @return this
|
||||
*/
|
||||
public Scan setTimeStamp(long timestamp) {
|
||||
try {
|
||||
|
@ -215,8 +219,9 @@ public class Scan implements Writable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Set the start row.
|
||||
* @param startRow
|
||||
* Set the start row of the scan.
|
||||
* @param startRow row to start scan on, inclusive
|
||||
* @return this
|
||||
*/
|
||||
public Scan setStartRow(byte [] startRow) {
|
||||
this.startRow = startRow;
|
||||
|
@ -225,7 +230,8 @@ public class Scan implements Writable {
|
|||
|
||||
/**
|
||||
* Set the stop row.
|
||||
* @param stopRow
|
||||
* @param stopRow row to end at (exclusive)
|
||||
* @return this
|
||||
*/
|
||||
public Scan setStopRow(byte [] stopRow) {
|
||||
this.stopRow = stopRow;
|
||||
|
@ -234,6 +240,7 @@ public class Scan implements Writable {
|
|||
|
||||
/**
|
||||
* Get all available versions.
|
||||
* @return this
|
||||
*/
|
||||
public Scan setMaxVersions() {
|
||||
this.maxVersions = Integer.MAX_VALUE;
|
||||
|
@ -243,6 +250,7 @@ public class Scan implements Writable {
|
|||
/**
|
||||
* Get up to the specified number of versions of each column.
|
||||
* @param maxVersions maximum versions for each column
|
||||
* @return this
|
||||
*/
|
||||
public Scan setMaxVersions(int maxVersions) {
|
||||
this.maxVersions = maxVersions;
|
||||
|
@ -270,6 +278,7 @@ public class Scan implements Writable {
|
|||
/**
|
||||
* Apply the specified server-side filter when performing the Scan.
|
||||
* @param filter filter to run on the server
|
||||
* @return this
|
||||
*/
|
||||
public Scan setFilter(Filter filter) {
|
||||
this.filter = filter;
|
||||
|
@ -278,7 +287,8 @@ public class Scan implements Writable {
|
|||
|
||||
/**
|
||||
* Setting the familyMap
|
||||
* @param familyMap
|
||||
* @param familyMap map of family to qualifier
|
||||
* @return this
|
||||
*/
|
||||
public Scan setFamilyMap(Map<byte [], NavigableSet<byte []>> familyMap) {
|
||||
this.familyMap = familyMap;
|
||||
|
@ -410,15 +420,16 @@ public class Scan implements Writable {
|
|||
sb.append(", stopRow=");
|
||||
sb.append(Bytes.toString(this.stopRow));
|
||||
sb.append(", maxVersions=");
|
||||
sb.append("" + this.maxVersions);
|
||||
sb.append(this.maxVersions);
|
||||
sb.append(", batch=");
|
||||
sb.append("" + this.batch);
|
||||
sb.append(this.batch);
|
||||
sb.append(", caching=");
|
||||
sb.append("" + this.caching);
|
||||
sb.append(this.caching);
|
||||
sb.append(", cacheBlocks=");
|
||||
sb.append("" + this.cacheBlocks);
|
||||
sb.append(this.cacheBlocks);
|
||||
sb.append(", timeRange=");
|
||||
sb.append("[" + this.tr.getMin() + "," + this.tr.getMax() + ")");
|
||||
sb.append("[").append(this.tr.getMin()).append(",");
|
||||
sb.append(this.tr.getMax()).append(")");
|
||||
sb.append(", families=");
|
||||
if(this.familyMap.size() == 0) {
|
||||
sb.append("ALL");
|
||||
|
@ -539,7 +550,7 @@ public class Scan implements Writable {
|
|||
* <p>
|
||||
* Note: It will through an error when the colon is missing.
|
||||
*
|
||||
* @param familyAndQualifier
|
||||
* @param familyAndQualifier family and qualifier
|
||||
* @return A reference to this instance.
|
||||
* @throws IllegalArgumentException When the colon is missing.
|
||||
* @deprecated use {@link #addColumn(byte[], byte[])} instead
|
||||
|
@ -561,10 +572,11 @@ public class Scan implements Writable {
|
|||
*
|
||||
* @param columns array of columns, formatted as <pre>family:qualifier</pre>
|
||||
* @deprecated issue multiple {@link #addColumn(byte[], byte[])} instead
|
||||
* @return this
|
||||
*/
|
||||
public Scan addColumns(byte [][] columns) {
|
||||
for (int i = 0; i < columns.length; i++) {
|
||||
addColumn(columns[i]);
|
||||
for (byte[] column : columns) {
|
||||
addColumn(column);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -608,12 +620,12 @@ public class Scan implements Writable {
|
|||
for (byte[] qual : quals) {
|
||||
if (cs.length() > 0) cs.append(" ");
|
||||
// encode values to make parsing easier later
|
||||
cs.append(Bytes.toStringBinary(fam) + ":" + Bytes.toStringBinary(qual));
|
||||
cs.append(Bytes.toStringBinary(fam)).append(":").append(Bytes.toStringBinary(qual));
|
||||
}
|
||||
cols.append(cs);
|
||||
} else {
|
||||
// only add the family but with old style delimiter
|
||||
cols.append(Bytes.toStringBinary(fam) + ":");
|
||||
cols.append(Bytes.toStringBinary(fam)).append(":");
|
||||
}
|
||||
}
|
||||
return cols.toString();
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
|
||||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -21,8 +20,6 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
@ -30,6 +27,8 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
||||
/**
|
||||
* Retries scanner operations such as create, next, etc.
|
||||
|
@ -43,9 +42,9 @@ public class ScannerCallable extends ServerCallable<Result[]> {
|
|||
private int caching = 1;
|
||||
|
||||
/**
|
||||
* @param connection
|
||||
* @param tableName
|
||||
* @param scan
|
||||
* @param connection which connection
|
||||
* @param tableName table callable is on
|
||||
* @param scan the scan to execute
|
||||
*/
|
||||
public ScannerCallable (HConnection connection, byte [] tableName, Scan scan) {
|
||||
super(connection, tableName, scan.getStartRow());
|
||||
|
@ -53,7 +52,7 @@ public class ScannerCallable extends ServerCallable<Result[]> {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param reload
|
||||
* @param reload force reload of server location
|
||||
* @throws IOException
|
||||
*/
|
||||
@Override
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,12 +20,12 @@
|
|||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
* Abstract class that implements Callable, used by retryable actions.
|
||||
* @param <T> the class that the ServerCallable handles
|
||||
|
@ -38,9 +38,9 @@ public abstract class ServerCallable<T> implements Callable<T> {
|
|||
protected HRegionInterface server;
|
||||
|
||||
/**
|
||||
* @param connection
|
||||
* @param tableName
|
||||
* @param row
|
||||
* @param connection connection callable is on
|
||||
* @param tableName table name callable is on
|
||||
* @param row row we are querying
|
||||
*/
|
||||
public ServerCallable(HConnection connection, byte [] tableName, byte [] row) {
|
||||
this.connection = connection;
|
||||
|
@ -51,7 +51,7 @@ public abstract class ServerCallable<T> implements Callable<T> {
|
|||
/**
|
||||
*
|
||||
* @param reload set this to true if connection should re-find the region
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void instantiateServer(boolean reload) throws IOException {
|
||||
this.location = connection.getRegionLocation(tableName, row, reload);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
|||
public interface ServerConnection extends HConnection {
|
||||
/**
|
||||
* Set root region location in connection
|
||||
* @param rootRegion
|
||||
* @param rootRegion region location for root region
|
||||
*/
|
||||
public void setRootRegionLocation(HRegionLocation rootRegion);
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -36,7 +36,7 @@ public class ServerConnectionManager extends HConnectionManager {
|
|||
/**
|
||||
* Get the connection object for the instance specified by the configuration
|
||||
* If no current connection exists, create a new connection for that instance
|
||||
* @param conf
|
||||
* @param conf configuration
|
||||
* @return HConnection object for the instance specified by the configuration
|
||||
*/
|
||||
public static ServerConnection getConnection(Configuration conf) {
|
||||
|
|
|
@ -1,3 +1,23 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -9,7 +29,7 @@ import org.apache.hadoop.hbase.io.hfile.Compression;
|
|||
public class UnmodifyableHColumnDescriptor extends HColumnDescriptor {
|
||||
|
||||
/**
|
||||
* @param desc
|
||||
* @param desc wrapped
|
||||
*/
|
||||
public UnmodifyableHColumnDescriptor (final HColumnDescriptor desc) {
|
||||
super(desc);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -22,7 +22,6 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
//import org.apache.hadoop.hbase.client.tableindexed.IndexSpecification;
|
||||
|
||||
/**
|
||||
* Read-only table descriptor.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -17,6 +17,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
Provides HBase Client
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -22,7 +22,7 @@ package org.apache.hadoop.hbase.filter;
|
|||
|
||||
/**
|
||||
* A binary comparator which lexicographically compares against the specified
|
||||
* byte array using {@link Bytes#compareTo(byte[], byte[])}.
|
||||
* byte array using {@link org.apache.hadoop.hbase.util.Bytes#compareTo(byte[], byte[])}.
|
||||
*/
|
||||
public class BinaryComparator extends WritableByteArrayComparable {
|
||||
|
||||
|
@ -31,7 +31,7 @@ public class BinaryComparator extends WritableByteArrayComparable {
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param value
|
||||
* @param value value
|
||||
*/
|
||||
public BinaryComparator(byte[] value) {
|
||||
super(value);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -34,7 +34,7 @@ public class BinaryPrefixComparator extends WritableByteArrayComparable {
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param value
|
||||
* @param value value
|
||||
*/
|
||||
public BinaryPrefixComparator(byte[] value) {
|
||||
super(value);
|
||||
|
|
|
@ -1,11 +1,31 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
/**
|
||||
* Simple filter that returns first N columns on row only.
|
||||
* This filter was written to test filters in Get and as soon as it gets
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,14 +20,14 @@
|
|||
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
|
||||
/**
|
||||
* This is a generic filter to be used to filter by comparison. It takes an
|
||||
* operator (equal, greater, not equal, etc) and a byte [] comparator.
|
||||
|
@ -58,7 +58,7 @@ public abstract class CompareFilter implements Filter {
|
|||
/** greater than or equal to */
|
||||
GREATER_OR_EQUAL,
|
||||
/** greater than */
|
||||
GREATER;
|
||||
GREATER,
|
||||
}
|
||||
|
||||
protected CompareOp compareOp;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,8 +20,8 @@
|
|||
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/**
|
||||
* Interface for row and column filters directly applied within the regionserver.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,18 +19,18 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implementation of {@link Filter} that represents an ordered List of Filters
|
||||
* which will be evaluated with a specified boolean operator {@link Operator#MUST_PASS_ALL}
|
||||
|
@ -64,7 +64,7 @@ public class FilterList implements Filter {
|
|||
* Constructor that takes a set of {@link Filter}s. The default operator
|
||||
* MUST_PASS_ALL is assumed.
|
||||
*
|
||||
* @param rowFilters
|
||||
* @param rowFilters list of filters
|
||||
*/
|
||||
public FilterList(final List<Filter> rowFilters) {
|
||||
this.filters = rowFilters;
|
||||
|
@ -111,7 +111,7 @@ public class FilterList implements Filter {
|
|||
/**
|
||||
* Add a filter.
|
||||
*
|
||||
* @param filter
|
||||
* @param filter another filter
|
||||
*/
|
||||
public void addFilter(Filter filter) {
|
||||
this.filters.add(filter);
|
||||
|
@ -178,7 +178,7 @@ public class FilterList implements Filter {
|
|||
return ReturnCode.INCLUDE;
|
||||
case NEXT_ROW:
|
||||
case SKIP:
|
||||
continue;
|
||||
// continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -23,9 +23,9 @@ package org.apache.hadoop.hbase.filter;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInput;
|
||||
|
||||
/**
|
||||
* A Filter that stops after the given row. There is no "RowStopFilter" because
|
||||
|
@ -55,6 +55,7 @@ public class InclusiveStopFilter implements Filter {
|
|||
|
||||
public boolean filterRowKey(byte[] buffer, int offset, int length) {
|
||||
if (buffer == null) {
|
||||
//noinspection RedundantIfStatement
|
||||
if (this.stopRowKey == null) {
|
||||
return true; //filter...
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
/**
|
||||
* Implementation of Filter interface that limits results to a specific page
|
||||
* size. It terminates scanning once the number of filter-passed rows is >
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,13 +19,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* This comparator is for use with {@link CompareFilter} implementations, such
|
||||
* as {@link RowFilter}, {@link QualifierFilter}, and {@link ValueFilter}, for
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,11 +20,6 @@
|
|||
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -33,6 +28,11 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
|
|||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* This filter is used to filter cells based on value. It takes a {@link CompareFilter.CompareOp}
|
||||
* operator (equal, greater, not equal, etc), and either a byte [] value or
|
||||
|
@ -228,6 +228,7 @@ public class SingleColumnValueFilter implements Filter {
|
|||
* If true, the entire row will be skipped if the column is not found.
|
||||
* <p>
|
||||
* If false, the row will pass if the column is not found. This is default.
|
||||
* @param filterIfMissing flag
|
||||
*/
|
||||
public void setFilterIfMissing(boolean filterIfMissing) {
|
||||
this.filterIfMissing = filterIfMissing;
|
||||
|
@ -238,6 +239,7 @@ public class SingleColumnValueFilter implements Filter {
|
|||
* If true, the row will be returned if only the latest version of the column
|
||||
* value matches. If false, the row will be returned if any version of the
|
||||
* column value matches. The default is true.
|
||||
* @return return value
|
||||
*/
|
||||
public boolean getLatestVersionOnly() {
|
||||
return latestVersionOnly;
|
||||
|
@ -248,6 +250,7 @@ public class SingleColumnValueFilter implements Filter {
|
|||
* If true, the row will be returned if only the latest version of the column
|
||||
* value matches. If false, the row will be returned if any version of the
|
||||
* column value matches. The default is true.
|
||||
* @param latestVersionOnly flag
|
||||
*/
|
||||
public void setLatestVersionOnly(boolean latestVersionOnly) {
|
||||
this.latestVersionOnly = latestVersionOnly;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -22,9 +22,9 @@ package org.apache.hadoop.hbase.filter;
|
|||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInput;
|
||||
|
||||
/**
|
||||
* A wrapper filter that filters an entire row if any of the KeyValue checks do
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
* This comparator is for use with ColumnValueFilter, for filtering based on
|
||||
* the value of a given column. Use it to test if a given substring appears
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -22,9 +22,9 @@ package org.apache.hadoop.hbase.filter;
|
|||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
import java.io.DataInput;
|
||||
|
||||
/**
|
||||
* A wrapper filter that returns true from {@link #filterAllRemaining()} as soon
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,13 +19,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.filter;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
/** Base class, combines Comparable<byte []> and Writable. */
|
||||
public abstract class WritableByteArrayComparable implements Writable, Comparable<byte[]> {
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -17,7 +17,9 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/**Provides row-level filters applied to HRegion scan results during calls to
|
||||
|
||||
/**
|
||||
* Provides row-level filters applied to HRegion scan results during calls to
|
||||
* {@link org.apache.hadoop.hbase.client.ResultScanner#next()}.
|
||||
|
||||
<p>
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,41 +20,38 @@
|
|||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.net.Socket;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.net.ConnectException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import javax.net.SocketFactory;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Hashtable;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
/** A client for an IPC service. IPC calls take a single {@link Writable} as a
|
||||
* parameter, and return a {@link Writable} as their value. A service runs on
|
||||
* a port and is defined by a parameter class and a value class.
|
||||
|
@ -64,24 +63,24 @@ import org.apache.hadoop.util.ReflectionUtils;
|
|||
*/
|
||||
public class HBaseClient {
|
||||
|
||||
public static final Log LOG =
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog("org.apache.hadoop.ipc.HBaseClient");
|
||||
protected Hashtable<ConnectionId, Connection> connections =
|
||||
protected final Hashtable<ConnectionId, Connection> connections =
|
||||
new Hashtable<ConnectionId, Connection>();
|
||||
|
||||
protected Class<? extends Writable> valueClass; // class of call values
|
||||
protected final Class<? extends Writable> valueClass; // class of call values
|
||||
protected int counter; // counter for call ids
|
||||
protected AtomicBoolean running = new AtomicBoolean(true); // if client runs
|
||||
protected final AtomicBoolean running = new AtomicBoolean(true); // if client runs
|
||||
final protected Configuration conf;
|
||||
final protected int maxIdleTime; //connections will be culled if it was idle for
|
||||
//maxIdleTime msecs
|
||||
final protected int maxIdleTime; // connections will be culled if it was idle for
|
||||
// maxIdleTime microsecs
|
||||
final protected int maxRetries; //the max. no. of retries for socket connections
|
||||
final protected long failureSleep; // Time to sleep before retry on failure.
|
||||
protected boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
protected boolean tcpKeepAlive; // if T then use keepalives
|
||||
protected int pingInterval; // how often sends ping to the server in msecs
|
||||
protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
protected final boolean tcpKeepAlive; // if T then use keepalives
|
||||
protected final int pingInterval; // how often sends ping to the server in msecs
|
||||
|
||||
protected SocketFactory socketFactory; // how to create sockets
|
||||
protected final SocketFactory socketFactory; // how to create sockets
|
||||
private int refCount = 1;
|
||||
|
||||
final private static String PING_INTERVAL_NAME = "ipc.ping.interval";
|
||||
|
@ -94,7 +93,8 @@ public class HBaseClient {
|
|||
* @param conf Configuration
|
||||
* @param pingInterval the ping interval
|
||||
*/
|
||||
final public static void setPingInterval(Configuration conf, int pingInterval) {
|
||||
@SuppressWarnings({"UnusedDeclaration"})
|
||||
public static void setPingInterval(Configuration conf, int pingInterval) {
|
||||
conf.setInt(PING_INTERVAL_NAME, pingInterval);
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ public class HBaseClient {
|
|||
* @param conf Configuration
|
||||
* @return the ping interval
|
||||
*/
|
||||
final static int getPingInterval(Configuration conf) {
|
||||
static int getPingInterval(Configuration conf) {
|
||||
return conf.getInt(PING_INTERVAL_NAME, DEFAULT_PING_INTERVAL);
|
||||
}
|
||||
|
||||
|
@ -136,8 +136,8 @@ public class HBaseClient {
|
|||
|
||||
/** A call waiting for a value. */
|
||||
private class Call {
|
||||
int id; // call id
|
||||
Writable param; // parameter
|
||||
final int id; // call id
|
||||
final Writable param; // parameter
|
||||
Writable value; // value, null if error
|
||||
IOException error; // exception, null if value
|
||||
boolean done; // true when call is done
|
||||
|
@ -187,9 +187,9 @@ public class HBaseClient {
|
|||
private DataOutputStream out;
|
||||
|
||||
// currently active calls
|
||||
private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
|
||||
private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
||||
protected AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||
private final Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
|
||||
private final AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
||||
protected final AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||
private IOException closeException; // close reason
|
||||
|
||||
public Connection(InetSocketAddress address) throws IOException {
|
||||
|
@ -287,6 +287,7 @@ public class HBaseClient {
|
|||
/** Connect to the server and set up the I/O streams. It then sends
|
||||
* a header to the server and starts
|
||||
* the connection thread that waits for responses.
|
||||
* @throws java.io.IOException e
|
||||
*/
|
||||
protected synchronized void setupIOstreams() throws IOException {
|
||||
if (socket != null || shouldCloseConnection.get()) {
|
||||
|
@ -395,6 +396,7 @@ public class HBaseClient {
|
|||
*
|
||||
* Return true if it is time to read a response; false otherwise.
|
||||
*/
|
||||
@SuppressWarnings({"ThrowableInstanceNeverThrown"})
|
||||
private synchronized boolean waitForWork() {
|
||||
if (calls.isEmpty() && !shouldCloseConnection.get() && running.get()) {
|
||||
long timeout = maxIdleTime-
|
||||
|
@ -402,7 +404,7 @@ public class HBaseClient {
|
|||
if (timeout>0) {
|
||||
try {
|
||||
wait(timeout);
|
||||
} catch (InterruptedException e) {}
|
||||
} catch (InterruptedException ignored) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -431,7 +433,8 @@ public class HBaseClient {
|
|||
long curTime = System.currentTimeMillis();
|
||||
if ( curTime - lastActivity.get() >= pingInterval) {
|
||||
lastActivity.set(curTime);
|
||||
synchronized (out) {
|
||||
//noinspection SynchronizeOnNonFinalField
|
||||
synchronized (this.out) {
|
||||
out.writeInt(PING_CALL_ID);
|
||||
out.flush();
|
||||
}
|
||||
|
@ -455,18 +458,18 @@ public class HBaseClient {
|
|||
+ connections.size());
|
||||
}
|
||||
|
||||
/** Initiates a call by sending the parameter to the remote server.
|
||||
/* Initiates a call by sending the parameter to the remote server.
|
||||
* Note: this is not called from the Connection thread, but by other
|
||||
* threads.
|
||||
* @param call
|
||||
*/
|
||||
public void sendParam(Call call) {
|
||||
protected void sendParam(Call call) {
|
||||
if (shouldCloseConnection.get()) {
|
||||
return;
|
||||
}
|
||||
|
||||
DataOutputBuffer d=null;
|
||||
try {
|
||||
//noinspection SynchronizeOnNonFinalField
|
||||
synchronized (this.out) { // FindBugs IS2_INCONSISTENT_SYNC
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(getName() + " sending #" + call.id);
|
||||
|
@ -510,6 +513,7 @@ public class HBaseClient {
|
|||
|
||||
boolean isError = in.readBoolean(); // read if error
|
||||
if (isError) {
|
||||
//noinspection ThrowableInstanceNeverThrown
|
||||
call.setException(new RemoteException( WritableUtils.readString(in),
|
||||
WritableUtils.readString(in)));
|
||||
} else {
|
||||
|
@ -585,8 +589,8 @@ public class HBaseClient {
|
|||
|
||||
/** Call implementation used for parallel calls. */
|
||||
private class ParallelCall extends Call {
|
||||
private ParallelResults results;
|
||||
protected int index;
|
||||
private final ParallelResults results;
|
||||
protected final int index;
|
||||
|
||||
public ParallelCall(Writable param, ParallelResults results, int index) {
|
||||
super(param);
|
||||
|
@ -603,7 +607,7 @@ public class HBaseClient {
|
|||
|
||||
/** Result collector for parallel calls. */
|
||||
private static class ParallelResults {
|
||||
protected Writable[] values;
|
||||
protected final Writable[] values;
|
||||
protected int size;
|
||||
protected int count;
|
||||
|
||||
|
@ -612,11 +616,10 @@ public class HBaseClient {
|
|||
this.size = size;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Collect a result.
|
||||
* @param call
|
||||
*/
|
||||
public synchronized void callComplete(ParallelCall call) {
|
||||
synchronized void callComplete(ParallelCall call) {
|
||||
// FindBugs IS2_INCONSISTENT_SYNC
|
||||
values[call.index] = call.value; // store the value
|
||||
count++; // count it
|
||||
|
@ -628,9 +631,9 @@ public class HBaseClient {
|
|||
/**
|
||||
* Construct an IPC client whose values are of the given {@link Writable}
|
||||
* class.
|
||||
* @param valueClass
|
||||
* @param conf
|
||||
* @param factory
|
||||
* @param valueClass value class
|
||||
* @param conf configuration
|
||||
* @param factory socket factory
|
||||
*/
|
||||
public HBaseClient(Class<? extends Writable> valueClass, Configuration conf,
|
||||
SocketFactory factory) {
|
||||
|
@ -651,8 +654,8 @@ public class HBaseClient {
|
|||
|
||||
/**
|
||||
* Construct an IPC client with the default SocketFactory
|
||||
* @param valueClass
|
||||
* @param conf
|
||||
* @param valueClass value class
|
||||
* @param conf configuration
|
||||
*/
|
||||
public HBaseClient(Class<? extends Writable> valueClass, Configuration conf) {
|
||||
this(valueClass, conf, NetUtils.getDefaultSocketFactory(conf));
|
||||
|
@ -688,7 +691,7 @@ public class HBaseClient {
|
|||
while (!connections.isEmpty()) {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
} catch (InterruptedException e) {
|
||||
} catch (InterruptedException ignored) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -696,10 +699,10 @@ public class HBaseClient {
|
|||
/** Make a call, passing <code>param</code>, to the IPC server running at
|
||||
* <code>address</code>, returning the value. Throws exceptions if there are
|
||||
* network problems or if the remote code threw an exception.
|
||||
* @param param
|
||||
* @param address
|
||||
* @param param writable parameter
|
||||
* @param address network address
|
||||
* @return Writable
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Writable call(Writable param, InetSocketAddress address)
|
||||
throws IOException {
|
||||
|
@ -712,6 +715,7 @@ public class HBaseClient {
|
|||
Call call = new Call(param);
|
||||
Connection connection = getConnection(addr, ticket, call);
|
||||
connection.sendParam(call); // send the parameter
|
||||
//noinspection SynchronizationOnLocalVariableOrMethodParameter
|
||||
synchronized (call) {
|
||||
while (!call.done) {
|
||||
try {
|
||||
|
@ -743,6 +747,7 @@ public class HBaseClient {
|
|||
* @param exception the relevant exception
|
||||
* @return an exception to throw
|
||||
*/
|
||||
@SuppressWarnings({"ThrowableInstanceNeverThrown"})
|
||||
private IOException wrapException(InetSocketAddress addr,
|
||||
IOException exception) {
|
||||
if (exception instanceof ConnectException) {
|
||||
|
@ -766,16 +771,18 @@ public class HBaseClient {
|
|||
* corresponding address. When all values are available, or have timed out
|
||||
* or errored, the collected results are returned in an array. The array
|
||||
* contains nulls for calls that timed out or errored.
|
||||
* @param params
|
||||
* @param addresses
|
||||
* @param params writable parameters
|
||||
* @param addresses socket addresses
|
||||
* @return Writable[]
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Writable[] call(Writable[] params, InetSocketAddress[] addresses)
|
||||
throws IOException {
|
||||
if (addresses.length == 0) return new Writable[0];
|
||||
|
||||
ParallelResults results = new ParallelResults(params.length);
|
||||
// TODO this synchronization block doesnt make any sense, we should possibly fix it
|
||||
//noinspection SynchronizationOnLocalVariableOrMethodParameter
|
||||
synchronized (results) {
|
||||
for (int i = 0; i < params.length; i++) {
|
||||
ParallelCall call = new ParallelCall(params[i], results, i);
|
||||
|
@ -792,14 +799,14 @@ public class HBaseClient {
|
|||
while (results.count != results.size) {
|
||||
try {
|
||||
results.wait(); // wait for all results
|
||||
} catch (InterruptedException e) {}
|
||||
} catch (InterruptedException ignored) {}
|
||||
}
|
||||
|
||||
return results.values;
|
||||
}
|
||||
}
|
||||
|
||||
/** Get a connection from the pool, or create a new one and add it to the
|
||||
/* Get a connection from the pool, or create a new one and add it to the
|
||||
* pool. Connections to a given host/port are reused. */
|
||||
private Connection getConnection(InetSocketAddress addr,
|
||||
UserGroupInformation ticket,
|
||||
|
@ -838,8 +845,8 @@ public class HBaseClient {
|
|||
* to servers are uniquely identified by <remoteAddress, ticket>
|
||||
*/
|
||||
private static class ConnectionId {
|
||||
InetSocketAddress address;
|
||||
UserGroupInformation ticket;
|
||||
final InetSocketAddress address;
|
||||
final UserGroupInformation ticket;
|
||||
|
||||
ConnectionId(InetSocketAddress address, UserGroupInformation ticket) {
|
||||
this.address = address;
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,6 +20,18 @@
|
|||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataOutput;
|
||||
import java.io.IOException;
|
||||
|
@ -29,26 +43,9 @@ import java.lang.reflect.Proxy;
|
|||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
|
||||
import org.apache.hadoop.hbase.io.HbaseObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/** A simple RPC mechanism.
|
||||
*
|
||||
* This is a local hbase copy of the hadoop RPC so we can do things like
|
||||
|
@ -99,8 +96,8 @@ public class HBaseRPC {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param method
|
||||
* @param parameters
|
||||
* @param method method to call
|
||||
* @param parameters parameters of call
|
||||
*/
|
||||
public Invocation(Method method, Object[] parameters) {
|
||||
this.methodName = method.getName();
|
||||
|
@ -174,6 +171,7 @@ public class HBaseRPC {
|
|||
* if no cached client exists.
|
||||
*
|
||||
* @param conf Configuration
|
||||
* @param factory socket factory
|
||||
* @return an IPC client
|
||||
*/
|
||||
protected synchronized HBaseClient getClient(Configuration conf,
|
||||
|
@ -208,6 +206,7 @@ public class HBaseRPC {
|
|||
/**
|
||||
* Stop a RPC client connection
|
||||
* A RPC client is closed only when its reference count becomes zero.
|
||||
* @param client client to stop
|
||||
*/
|
||||
protected void stopClient(HBaseClient client) {
|
||||
synchronized (this) {
|
||||
|
@ -231,10 +230,10 @@ public class HBaseRPC {
|
|||
private boolean isClosed = false;
|
||||
|
||||
/**
|
||||
* @param address
|
||||
* @param ticket
|
||||
* @param conf
|
||||
* @param factory
|
||||
* @param address address for invoker
|
||||
* @param ticket ticket
|
||||
* @param conf configuration
|
||||
* @param factory socket factory
|
||||
*/
|
||||
public Invoker(InetSocketAddress address, UserGroupInformation ticket,
|
||||
Configuration conf, SocketFactory factory) {
|
||||
|
@ -317,14 +316,14 @@ public class HBaseRPC {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param protocol
|
||||
* @param clientVersion
|
||||
* @param addr
|
||||
* @param conf
|
||||
* @param maxAttempts
|
||||
* @param timeout
|
||||
* @param protocol protocol interface
|
||||
* @param clientVersion which client version we expect
|
||||
* @param addr address of remote service
|
||||
* @param conf configuration
|
||||
* @param maxAttempts max attempts
|
||||
* @param timeout timeout in milliseconds
|
||||
* @return proxy
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static VersionedProtocol waitForProxy(Class protocol,
|
||||
|
@ -371,13 +370,13 @@ public class HBaseRPC {
|
|||
* Construct a client-side proxy object that implements the named protocol,
|
||||
* talking to a server at the named address.
|
||||
*
|
||||
* @param protocol
|
||||
* @param clientVersion
|
||||
* @param addr
|
||||
* @param conf
|
||||
* @param factory
|
||||
* @param protocol interface
|
||||
* @param clientVersion version we are expecting
|
||||
* @param addr remote address
|
||||
* @param conf configuration
|
||||
* @param factory socket factory
|
||||
* @return proxy
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static VersionedProtocol getProxy(Class<?> protocol,
|
||||
long clientVersion, InetSocketAddress addr, Configuration conf,
|
||||
|
@ -389,14 +388,14 @@ public class HBaseRPC {
|
|||
* Construct a client-side proxy object that implements the named protocol,
|
||||
* talking to a server at the named address.
|
||||
*
|
||||
* @param protocol
|
||||
* @param clientVersion
|
||||
* @param addr
|
||||
* @param ticket
|
||||
* @param conf
|
||||
* @param factory
|
||||
* @param protocol interface
|
||||
* @param clientVersion version we are expecting
|
||||
* @param addr remote address
|
||||
* @param ticket ticket
|
||||
* @param conf configuration
|
||||
* @param factory socket factory
|
||||
* @return proxy
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static VersionedProtocol getProxy(Class<?> protocol,
|
||||
long clientVersion, InetSocketAddress addr, UserGroupInformation ticket,
|
||||
|
@ -418,12 +417,12 @@ public class HBaseRPC {
|
|||
/**
|
||||
* Construct a client-side proxy object with the default SocketFactory
|
||||
*
|
||||
* @param protocol
|
||||
* @param clientVersion
|
||||
* @param addr
|
||||
* @param conf
|
||||
* @param protocol interface
|
||||
* @param clientVersion version we are expecting
|
||||
* @param addr remote address
|
||||
* @param conf configuration
|
||||
* @return a proxy instance
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static VersionedProtocol getProxy(Class<?> protocol,
|
||||
long clientVersion, InetSocketAddress addr, Configuration conf)
|
||||
|
@ -446,12 +445,12 @@ public class HBaseRPC {
|
|||
/**
|
||||
* Expert: Make multiple, parallel calls to a set of servers.
|
||||
*
|
||||
* @param method
|
||||
* @param params
|
||||
* @param addrs
|
||||
* @param conf
|
||||
* @param method method to invoke
|
||||
* @param params array of parameters
|
||||
* @param addrs array of addresses
|
||||
* @param conf configuration
|
||||
* @return values
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static Object[] call(Method method, Object[][] params,
|
||||
InetSocketAddress[] addrs, Configuration conf)
|
||||
|
@ -484,12 +483,12 @@ public class HBaseRPC {
|
|||
* Construct a server for a protocol implementation instance listening on a
|
||||
* port and address.
|
||||
*
|
||||
* @param instance
|
||||
* @param bindAddress
|
||||
* @param port
|
||||
* @param conf
|
||||
* @param instance instance
|
||||
* @param bindAddress bind address
|
||||
* @param port port to bind to
|
||||
* @param conf configuration
|
||||
* @return Server
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf)
|
||||
throws IOException {
|
||||
|
@ -500,14 +499,14 @@ public class HBaseRPC {
|
|||
* Construct a server for a protocol implementation instance listening on a
|
||||
* port and address.
|
||||
*
|
||||
* @param instance
|
||||
* @param bindAddress
|
||||
* @param port
|
||||
* @param numHandlers
|
||||
* @param verbose
|
||||
* @param conf
|
||||
* @param instance instance
|
||||
* @param bindAddress bind address
|
||||
* @param port port to bind to
|
||||
* @param numHandlers number of handlers to start
|
||||
* @param verbose verbose flag
|
||||
* @param conf configuration
|
||||
* @return Server
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public static Server getServer(final Object instance, final String bindAddress, final int port,
|
||||
final int numHandlers,
|
||||
|
@ -528,7 +527,7 @@ public class HBaseRPC {
|
|||
* @param conf the configuration to use
|
||||
* @param bindAddress the address to bind on to listen for connection
|
||||
* @param port the port to listen for connections on
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Server(Object instance, Configuration conf, String bindAddress, int port)
|
||||
throws IOException {
|
||||
|
@ -550,7 +549,7 @@ public class HBaseRPC {
|
|||
* @param port the port to listen for connections on
|
||||
* @param numHandlers the number of method handler threads to run
|
||||
* @param verbose whether each call should be logged
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Server(Object instance, Configuration conf, String bindAddress, int port,
|
||||
int numHandlers, boolean verbose) throws IOException {
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,14 +17,15 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import javax.management.ObjectName;
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import org.apache.hadoop.metrics.util.MBeanUtil;
|
||||
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
|
||||
import org.apache.hadoop.metrics.util.MetricsRegistry;
|
||||
|
||||
import javax.management.ObjectName;
|
||||
|
||||
/**
|
||||
* Exports HBase RPC statistics recorded in {@link HBaseRpcMetrics} as an MBean
|
||||
* for JMX monitoring.
|
||||
|
@ -30,6 +33,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry;
|
|||
public class HBaseRPCStatistics extends MetricsDynamicMBeanBase {
|
||||
private final ObjectName mbeanName;
|
||||
|
||||
@SuppressWarnings({"UnusedDeclaration"})
|
||||
public HBaseRPCStatistics(MetricsRegistry registry,
|
||||
String hostName, String port) {
|
||||
super(registry, "HBaseRPCStatistics");
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -15,6 +17,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
|
@ -23,8 +26,8 @@ import org.apache.hadoop.metrics.MetricsContext;
|
|||
import org.apache.hadoop.metrics.MetricsRecord;
|
||||
import org.apache.hadoop.metrics.MetricsUtil;
|
||||
import org.apache.hadoop.metrics.Updater;
|
||||
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
|
||||
import org.apache.hadoop.metrics.util.MetricsRegistry;
|
||||
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -63,7 +66,7 @@ public class HBaseRpcMetrics implements Updater {
|
|||
* - they can be set directly by calling their set/inc methods
|
||||
* -they can also be read directly - e.g. JMX does this.
|
||||
*/
|
||||
public MetricsRegistry registry = new MetricsRegistry();
|
||||
public final MetricsRegistry registry = new MetricsRegistry();
|
||||
|
||||
public MetricsTimeVaryingRate rpcQueueTime = new MetricsTimeVaryingRate("RpcQueueTime", registry);
|
||||
public MetricsTimeVaryingRate rpcProcessingTime = new MetricsTimeVaryingRate("RpcProcessingTime", registry);
|
||||
|
@ -75,8 +78,7 @@ public class HBaseRpcMetrics implements Updater {
|
|||
return (MetricsTimeVaryingRate) registry.get(key);
|
||||
}
|
||||
private MetricsTimeVaryingRate create(String key) {
|
||||
MetricsTimeVaryingRate newMetric = new MetricsTimeVaryingRate(key, this.registry);
|
||||
return newMetric;
|
||||
return new MetricsTimeVaryingRate(key, this.registry);
|
||||
}
|
||||
|
||||
public synchronized void inc(String name, int amt) {
|
||||
|
@ -89,14 +91,14 @@ public class HBaseRpcMetrics implements Updater {
|
|||
|
||||
/**
|
||||
* Push the metrics to the monitoring subsystem on doUpdate() call.
|
||||
* @param context
|
||||
* @param context ctx
|
||||
*/
|
||||
public void doUpdates(MetricsContext context) {
|
||||
rpcQueueTime.pushMetric(metricsRecord);
|
||||
rpcProcessingTime.pushMetric(metricsRecord);
|
||||
|
||||
synchronized (registry) {
|
||||
// Iterate through the registry to propogate the different rpc metrics.
|
||||
// Iterate through the registry to propagate the different rpc metrics.
|
||||
|
||||
for (String metricName : registry.getKeyList() ) {
|
||||
MetricsTimeVaryingRate value = (MetricsTimeVaryingRate) registry.get(metricName);
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
|
@ -18,6 +20,16 @@
|
|||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
|
@ -33,11 +45,11 @@ import java.net.UnknownHostException;
|
|||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.CancelledKeyException;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.Selector;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
import java.nio.channels.WritableByteChannel;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -48,16 +60,6 @@ import java.util.Random;
|
|||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.ObjectWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/** An abstract IPC service. IPC calls take a single {@link Writable} as a
|
||||
* parameter, and return a {@link Writable} as their value. A service runs on
|
||||
* a port and is defined by a parameter class and a value class.
|
||||
|
@ -143,6 +145,7 @@ public abstract class HBaseServer {
|
|||
|
||||
protected Configuration conf;
|
||||
|
||||
@SuppressWarnings({"FieldCanBeLocal"})
|
||||
private int maxQueueSize;
|
||||
protected int socketSendBufferSize;
|
||||
protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
|
||||
|
@ -151,7 +154,7 @@ public abstract class HBaseServer {
|
|||
volatile protected boolean running = true; // true while server runs
|
||||
protected BlockingQueue<Call> callQueue; // queued calls
|
||||
|
||||
protected List<Connection> connectionList =
|
||||
protected final List<Connection> connectionList =
|
||||
Collections.synchronizedList(new LinkedList<Connection>());
|
||||
//maintain a list
|
||||
//of client connections
|
||||
|
@ -254,6 +257,7 @@ public abstract class HBaseServer {
|
|||
* that will be cleanedup per run. The criteria for cleanup is the time
|
||||
* for which the connection was idle. If 'force' is true then all
|
||||
* connections will be looked at for the cleanup.
|
||||
* @param force all connections will be looked at for cleanup
|
||||
*/
|
||||
private void cleanupConnections(boolean force) {
|
||||
if (force || numConnections > thresholdIdleConnections) {
|
||||
|
@ -288,6 +292,7 @@ public abstract class HBaseServer {
|
|||
closeConnection(c);
|
||||
numNuked++;
|
||||
end--;
|
||||
//noinspection UnusedAssignment
|
||||
c = null;
|
||||
if (!force && numNuked == maxConnectionsToNuke) break;
|
||||
}
|
||||
|
@ -317,7 +322,7 @@ public abstract class HBaseServer {
|
|||
else if (key.isReadable())
|
||||
doRead(key);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
key = null;
|
||||
}
|
||||
|
@ -336,7 +341,7 @@ public abstract class HBaseServer {
|
|||
LOG.warn("Out of Memory in server select", e);
|
||||
closeCurrentConnection(key);
|
||||
cleanupConnections(true);
|
||||
try { Thread.sleep(60000); } catch (Exception ie) {}
|
||||
try { Thread.sleep(60000); } catch (Exception ignored) {}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
if (running) { // unexpected -- log it
|
||||
|
@ -354,7 +359,7 @@ public abstract class HBaseServer {
|
|||
try {
|
||||
acceptChannel.close();
|
||||
selector.close();
|
||||
} catch (IOException e) { }
|
||||
} catch (IOException ignored) { }
|
||||
|
||||
selector= null;
|
||||
acceptChannel= null;
|
||||
|
@ -373,7 +378,6 @@ public abstract class HBaseServer {
|
|||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
|
||||
closeConnection(c);
|
||||
c = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -383,7 +387,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
void doAccept(SelectionKey key) throws IOException, OutOfMemoryError {
|
||||
Connection c = null;
|
||||
Connection c;
|
||||
ServerSocketChannel server = (ServerSocketChannel) key.channel();
|
||||
// accept up to 10 connections
|
||||
for (int i=0; i<10; i++) {
|
||||
|
@ -429,7 +433,7 @@ public abstract class HBaseServer {
|
|||
c.getHostAddress() + ". Number of active connections: "+
|
||||
numConnections);
|
||||
closeConnection(c);
|
||||
c = null;
|
||||
// c = null;
|
||||
}
|
||||
else {
|
||||
c.setLastContact(System.currentTimeMillis());
|
||||
|
@ -528,7 +532,7 @@ public abstract class HBaseServer {
|
|||
// some thread(s) a chance to finish
|
||||
//
|
||||
LOG.warn("Out of Memory in server select", e);
|
||||
try { Thread.sleep(60000); } catch (Exception ie) {}
|
||||
try { Thread.sleep(60000); } catch (Exception ignored) {}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception in Responder " +
|
||||
|
@ -568,9 +572,8 @@ public abstract class HBaseServer {
|
|||
// for a long time.
|
||||
//
|
||||
private void doPurge(Call call, long now) {
|
||||
LinkedList<Call> responseQueue = call.connection.responseQueue;
|
||||
synchronized (responseQueue) {
|
||||
Iterator<Call> iter = responseQueue.listIterator(0);
|
||||
synchronized (call.connection.responseQueue) {
|
||||
Iterator<Call> iter = call.connection.responseQueue.listIterator(0);
|
||||
while (iter.hasNext()) {
|
||||
Call nextCall = iter.next();
|
||||
if (now > nextCall.timestamp + PURGE_INTERVAL) {
|
||||
|
@ -584,13 +587,15 @@ public abstract class HBaseServer {
|
|||
// Processes one response. Returns true if there are no more pending
|
||||
// data for this channel.
|
||||
//
|
||||
private boolean processResponse(LinkedList<Call> responseQueue,
|
||||
@SuppressWarnings({"ConstantConditions"})
|
||||
private boolean processResponse(final LinkedList<Call> responseQueue,
|
||||
boolean inHandler) throws IOException {
|
||||
boolean error = true;
|
||||
boolean done = false; // there is more data for this channel.
|
||||
int numElements = 0;
|
||||
int numElements;
|
||||
Call call = null;
|
||||
try {
|
||||
//noinspection SynchronizationOnLocalVariableOrMethodParameter
|
||||
synchronized (responseQueue) {
|
||||
//
|
||||
// If there are no items for this channel, then we are done
|
||||
|
@ -618,6 +623,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
if (!call.response.hasRemaining()) {
|
||||
call.connection.decRpcCount();
|
||||
//noinspection RedundantIfStatement
|
||||
if (numElements == 1) { // last call fully processes.
|
||||
done = true; // no more data for this channel.
|
||||
} else {
|
||||
|
@ -706,7 +712,7 @@ public abstract class HBaseServer {
|
|||
protected SocketChannel channel;
|
||||
private ByteBuffer data;
|
||||
private ByteBuffer dataLengthBuffer;
|
||||
protected LinkedList<Call> responseQueue;
|
||||
protected final LinkedList<Call> responseQueue;
|
||||
private volatile int rpcCount = 0; // number of outstanding rpcs
|
||||
private long lastContact;
|
||||
private int dataLength;
|
||||
|
@ -774,9 +780,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
protected boolean timedOut(long currentTime) {
|
||||
if (isIdle() && currentTime - lastContact > maxIdleTime)
|
||||
return true;
|
||||
return false;
|
||||
return isIdle() && currentTime - lastContact > maxIdleTime;
|
||||
}
|
||||
|
||||
public int readAndProcess() throws IOException, InterruptedException {
|
||||
|
@ -784,7 +788,7 @@ public abstract class HBaseServer {
|
|||
/* Read at most one RPC. If the header is not read completely yet
|
||||
* then iterate until we read first RPC or until there is no data left.
|
||||
*/
|
||||
int count = -1;
|
||||
int count;
|
||||
if (dataLengthBuffer.remaining() > 0) {
|
||||
count = channelRead(channel, dataLengthBuffer);
|
||||
if (count < 0 || dataLengthBuffer.remaining() > 0)
|
||||
|
@ -875,11 +879,11 @@ public abstract class HBaseServer {
|
|||
dataLengthBuffer = null;
|
||||
if (!channel.isOpen())
|
||||
return;
|
||||
try {socket.shutdownOutput();} catch(Exception e) {} // FindBugs DE_MIGHT_IGNORE
|
||||
try {socket.shutdownOutput();} catch(Exception ignored) {} // FindBugs DE_MIGHT_IGNORE
|
||||
if (channel.isOpen()) {
|
||||
try {channel.close();} catch(Exception e) {}
|
||||
try {channel.close();} catch(Exception ignored) {}
|
||||
}
|
||||
try {socket.close();} catch(Exception e) {}
|
||||
try {socket.close();} catch(Exception ignored) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -973,7 +977,7 @@ public abstract class HBaseServer {
|
|||
{
|
||||
this(bindAddress, port, paramClass, handlerCount, conf, Integer.toString(port));
|
||||
}
|
||||
/** Constructs a server listening on the named port and address. Parameters passed must
|
||||
/* Constructs a server listening on the named port and address. Parameters passed must
|
||||
* be of the named class. The <code>handlerCount</handlerCount> determines
|
||||
* the number of handler threads that will be used to process calls.
|
||||
*
|
||||
|
@ -1015,7 +1019,7 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
/** Sets the socket buffer size used for responding to RPCs.
|
||||
* @param size
|
||||
* @param size send size
|
||||
*/
|
||||
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
|
||||
|
||||
|
@ -1054,7 +1058,7 @@ public abstract class HBaseServer {
|
|||
/** Wait for the server to be stopped.
|
||||
* Does not wait for all subthreads to finish.
|
||||
* See {@link #stop()}.
|
||||
* @throws InterruptedException
|
||||
* @throws InterruptedException e
|
||||
*/
|
||||
public synchronized void join() throws InterruptedException {
|
||||
while (running) {
|
||||
|
@ -1071,10 +1075,10 @@ public abstract class HBaseServer {
|
|||
}
|
||||
|
||||
/** Called for each call.
|
||||
* @param param
|
||||
* @param receiveTime
|
||||
* @param param writable parameter
|
||||
* @param receiveTime time
|
||||
* @return Writable
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public abstract Writable call(Writable param, long receiveTime)
|
||||
throws IOException;
|
||||
|
@ -1118,6 +1122,10 @@ public abstract class HBaseServer {
|
|||
* as a result of multiple write operations required to write a large
|
||||
* buffer.
|
||||
*
|
||||
* @param channel writable byte channel to write to
|
||||
* @param buffer buffer to write
|
||||
* @return number of bytes written
|
||||
* @throws java.io.IOException e
|
||||
* @see WritableByteChannel#write(ByteBuffer)
|
||||
*/
|
||||
protected static int channelWrite(WritableByteChannel channel,
|
||||
|
@ -1132,6 +1140,10 @@ public abstract class HBaseServer {
|
|||
* This is to avoid jdk from creating many direct buffers as the size of
|
||||
* ByteBuffer increases. There should not be any performance degredation.
|
||||
*
|
||||
* @param channel writable byte channel to write on
|
||||
* @param buffer buffer to write
|
||||
* @return number of bytes written
|
||||
* @throws java.io.IOException e
|
||||
* @see ReadableByteChannel#read(ByteBuffer)
|
||||
*/
|
||||
protected static int channelRead(ReadableByteChannel channel,
|
||||
|
@ -1145,6 +1157,11 @@ public abstract class HBaseServer {
|
|||
* and {@link #channelWrite(WritableByteChannel, ByteBuffer)}. Only
|
||||
* one of readCh or writeCh should be non-null.
|
||||
*
|
||||
* @param readCh read channel
|
||||
* @param writeCh write channel
|
||||
* @param buf buffer to read or write into/out of
|
||||
* @return bytes written
|
||||
* @throws java.io.IOException e
|
||||
* @see #channelRead(ReadableByteChannel, ByteBuffer)
|
||||
* @see #channelWrite(WritableByteChannel, ByteBuffer)
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,14 +19,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.ClusterStatus;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Clients interact with the HMasterInterface to gain access to meta-level
|
||||
* HBase functionality, like finding an HRegionServer and creating/destroying
|
||||
|
@ -46,32 +46,32 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion {
|
|||
/**
|
||||
* Creates a new table
|
||||
* @param desc table descriptor
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void createTable(HTableDescriptor desc) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes a table
|
||||
* @param tableName
|
||||
* @throws IOException
|
||||
* @param tableName table to delete
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void deleteTable(final byte [] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Adds a column to the specified table
|
||||
* @param tableName
|
||||
* @param tableName table to modify
|
||||
* @param column column descriptor
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void addColumn(final byte [] tableName, HColumnDescriptor column)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Modifies an existing column on the specified table
|
||||
* @param tableName
|
||||
* @param tableName table name
|
||||
* @param columnName name of the column to edit
|
||||
* @param descriptor new column descriptor
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void modifyColumn(final byte [] tableName, final byte [] columnName,
|
||||
HColumnDescriptor descriptor)
|
||||
|
@ -79,48 +79,49 @@ public interface HMasterInterface extends HBaseRPCProtocolVersion {
|
|||
|
||||
|
||||
/**
|
||||
* Deletes a column from the specified table
|
||||
* @param tableName
|
||||
* @param columnName
|
||||
* @throws IOException
|
||||
* Deletes a column from the specified table. Table must be disabled.
|
||||
* @param tableName table to alter
|
||||
* @param columnName column family to remove
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void deleteColumn(final byte [] tableName, final byte [] columnName)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Puts the table on-line (only needed if table has been previously taken offline)
|
||||
* @param tableName
|
||||
* @throws IOException
|
||||
* @param tableName table to enable
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void enableTable(final byte [] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Take table offline
|
||||
*
|
||||
* @param tableName
|
||||
* @throws IOException
|
||||
* @param tableName table to take offline
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void disableTable(final byte [] tableName) throws IOException;
|
||||
|
||||
/**
|
||||
* Modify a table's metadata
|
||||
*
|
||||
* @param tableName
|
||||
* @param op
|
||||
* @param args
|
||||
* @throws IOException
|
||||
* @param tableName table to modify
|
||||
* @param op the operation to do
|
||||
* @param args arguments for operation
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void modifyTable(byte[] tableName, HConstants.Modify op, Writable[] args)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Shutdown an HBase cluster.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void shutdown() throws IOException;
|
||||
|
||||
/**
|
||||
* Return cluster status.
|
||||
* @return status object
|
||||
*/
|
||||
public ClusterStatus getClusterStatus();
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* HRegionServers interact with the HMasterRegionInterface to report on local
|
||||
|
@ -39,8 +39,8 @@ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion {
|
|||
|
||||
/**
|
||||
* Called when a region server first starts
|
||||
* @param info
|
||||
* @throws IOException
|
||||
* @param info server info
|
||||
* @throws IOException e
|
||||
* @return Configuration for the regionserver to use: e.g. filesystem,
|
||||
* hbase rootdir, etc.
|
||||
*/
|
||||
|
@ -56,7 +56,7 @@ public interface HMasterRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* reporting server's most loaded regions. These are candidates for being
|
||||
* rebalanced.
|
||||
* @return instructions from the master to the region server
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public HMsg[] regionServerReport(HServerInfo info, HMsg msgs[],
|
||||
HRegionInfo mostLoadedRegions[])
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,26 +19,25 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.MultiPut;
|
||||
import org.apache.hadoop.hbase.client.MultiPutResponse;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.MultiPutResponse;
|
||||
import org.apache.hadoop.hbase.client.MultiPut;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Clients interact with HRegionServers using a handle to the HRegionInterface.
|
||||
*
|
||||
* <p>NOTE: if you change the interface, you must change the RPC version
|
||||
* number in HBaseRPCProtocolVersion
|
||||
*
|
||||
*/
|
||||
public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
||||
/**
|
||||
|
@ -46,7 +45,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
*
|
||||
* @param regionName name of the region
|
||||
* @return HRegionInfo object for region
|
||||
* @throws NotServingRegionException
|
||||
* @throws NotServingRegionException e
|
||||
*/
|
||||
public HRegionInfo getRegionInfo(final byte [] regionName)
|
||||
throws NotServingRegionException;
|
||||
|
@ -60,7 +59,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @param row row key
|
||||
* @param family Column family to look for row in.
|
||||
* @return map of values
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result getClosestRowBefore(final byte [] regionName,
|
||||
final byte [] row, final byte [] family)
|
||||
|
@ -77,7 +76,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @param regionName name of region to get from
|
||||
* @param get Get operation
|
||||
* @return Result
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result get(byte [] regionName, Get get) throws IOException;
|
||||
|
||||
|
@ -86,15 +85,15 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @param regionName name of region to get from
|
||||
* @param get Get operation describing cell to test
|
||||
* @return true if exists
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public boolean exists(byte [] regionName, Get get) throws IOException;
|
||||
|
||||
/**
|
||||
* Put data into the specified region
|
||||
* @param regionName
|
||||
* @param regionName region name
|
||||
* @param put the data to be put
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void put(final byte [] regionName, final Put put)
|
||||
throws IOException;
|
||||
|
@ -102,11 +101,11 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
/**
|
||||
* Put an array of puts into the specified region
|
||||
*
|
||||
* @param regionName
|
||||
* @param puts
|
||||
* @param regionName region name
|
||||
* @param puts array of puts to execute
|
||||
* @return The number of processed put's. Returns -1 if all Puts
|
||||
* processed successfully.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public int put(final byte[] regionName, final Put [] puts)
|
||||
throws IOException;
|
||||
|
@ -115,9 +114,9 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* Deletes all the KeyValues that match those found in the Delete object,
|
||||
* if their ts <= to the Delete. In case of a delete with a specific ts it
|
||||
* only deletes that specific KeyValue.
|
||||
* @param regionName
|
||||
* @param delete
|
||||
* @throws IOException
|
||||
* @param regionName region name
|
||||
* @param delete delete object
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void delete(final byte[] regionName, final Delete delete)
|
||||
throws IOException;
|
||||
|
@ -125,26 +124,27 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
/**
|
||||
* Put an array of deletes into the specified region
|
||||
*
|
||||
* @param regionName
|
||||
* @param deletes
|
||||
* @param regionName region name
|
||||
* @param deletes delete array to execute
|
||||
* @return The number of processed deletes. Returns -1 if all Deletes
|
||||
* processed successfully.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public int delete(final byte[] regionName, final Delete [] deletes)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Atomically checks if a row/family/qualifier value match the expectedValue.
|
||||
* If it does, it adds the put.
|
||||
* If it does, it adds the put. If passed expected value is null, then the
|
||||
* check is for non-existance of the row/column.
|
||||
*
|
||||
* @param regionName
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param regionName region name
|
||||
* @param row row to check
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param value the expected value
|
||||
* @param put
|
||||
* @throws IOException
|
||||
* @param put data to put if check succeeds
|
||||
* @throws IOException e
|
||||
* @return true if the new put was execute, false otherwise
|
||||
*/
|
||||
public boolean checkAndPut(final byte[] regionName, final byte [] row,
|
||||
|
@ -154,16 +154,17 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
|
||||
/**
|
||||
* Atomically increments a column value. If the column value isn't long-like,
|
||||
* this could throw an exception.
|
||||
* this could throw an exception. If passed expected value is null, then the
|
||||
* check is for non-existance of the row/column.
|
||||
*
|
||||
* @param regionName
|
||||
* @param row
|
||||
* @param family
|
||||
* @param qualifier
|
||||
* @param amount
|
||||
* @param regionName region name
|
||||
* @param row row to check
|
||||
* @param family column family
|
||||
* @param qualifier column qualifier
|
||||
* @param amount long amount to increment
|
||||
* @param writeToWAL whether to write the increment to the WAL
|
||||
* @return new incremented column value
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public long incrementColumnValue(byte [] regionName, byte [] row,
|
||||
byte [] family, byte [] qualifier, long amount, boolean writeToWAL)
|
||||
|
@ -180,7 +181,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @param regionName name of region to scan
|
||||
* @param scan configured scan object
|
||||
* @return scannerId scanner identifier used in other calls
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public long openScanner(final byte [] regionName, final Scan scan)
|
||||
throws IOException;
|
||||
|
@ -189,7 +190,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* Get the next set of values
|
||||
* @param scannerId clientId passed to openScanner
|
||||
* @return map of values; returns null if no results.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result next(long scannerId) throws IOException;
|
||||
|
||||
|
@ -200,7 +201,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @return Array of Results (map of values); array is empty if done with this
|
||||
* region and null if we are NOT to go to the next region (happens when a
|
||||
* filter rules that the scan is done).
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public Result [] next(long scannerId, int numberOfRows) throws IOException;
|
||||
|
||||
|
@ -208,7 +209,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* Close a scanner
|
||||
*
|
||||
* @param scannerId the scanner id returned by openScanner
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void close(long scannerId) throws IOException;
|
||||
|
||||
|
@ -218,7 +219,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
* @param regionName name of region
|
||||
* @param row row to lock
|
||||
* @return lockId lock identifier
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public long lockRow(final byte [] regionName, final byte [] row)
|
||||
throws IOException;
|
||||
|
@ -226,9 +227,9 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
/**
|
||||
* Releases a remote row lock.
|
||||
*
|
||||
* @param regionName
|
||||
* @param regionName region name
|
||||
* @param lockId the lock id returned by lockRow
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public void unlockRow(final byte [] regionName, final long lockId)
|
||||
throws IOException;
|
||||
|
@ -237,14 +238,14 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
/**
|
||||
* Method used when a master is taking the place of another failed one.
|
||||
* @return All regions assigned on this region server
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public HRegionInfo[] getRegionsAssignment() throws IOException;
|
||||
|
||||
/**
|
||||
* Method used when a master is taking the place of another failed one.
|
||||
* @return The HSI
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public HServerInfo getHServerInfo() throws IOException;
|
||||
|
||||
|
@ -254,7 +255,7 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion {
|
|||
*
|
||||
* @param puts the request
|
||||
* @return the reply
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public MultiPutResponse multiPut(MultiPut puts) throws IOException;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Instantiated to add a column family to a table */
|
||||
class AddColumn extends ColumnOperation {
|
||||
private final HColumnDescriptor newColumn;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,13 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
|
@ -52,6 +45,13 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
import org.apache.hadoop.io.BooleanWritable;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
|
||||
/**
|
||||
* Base HRegion scanner class. Holds utilty common to <code>ROOT</code> and
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,11 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -32,6 +27,11 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* Instantiated to enable or disable a table
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -29,6 +27,8 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
abstract class ColumnOperation extends TableOperation {
|
||||
private final Log LOG = LogFactory.getLog(this.getClass());
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,13 +19,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Instantiated to remove a column family from a table */
|
||||
class DeleteColumn extends ColumnOperation {
|
||||
private final byte [] columnName;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,25 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.File;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.DelayQueue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -58,9 +39,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||
import org.apache.hadoop.hbase.MiniZooKeeperCluster;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableExistsException;
|
||||
import org.apache.hadoop.hbase.MiniZooKeeperCluster;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -95,6 +76,25 @@ import org.apache.zookeeper.Watcher;
|
|||
import org.apache.zookeeper.Watcher.Event.EventType;
|
||||
import org.apache.zookeeper.Watcher.Event.KeeperState;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.RuntimeMXBean;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.DelayQueue;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* HMaster is the "master server" for HBase. An HBase cluster has one active
|
||||
* master. If many masters are started, all compete. Whichever wins goes on to
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,14 +19,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* MetaScanner <code>META</code> table.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,11 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Instantiated to modify an existing column family on a table */
|
||||
class ModifyColumn extends ColumnOperation {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Instantiated to modify table descriptor metadata */
|
||||
class ModifyTableMeta extends TableOperation {
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,18 +19,18 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Pattern;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This Chore, everytime it runs, will clear the logs in the old logs folder
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,10 +19,10 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* ProcessRegionClose is the way we do post-processing on a closed region. We
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
|
@ -28,6 +26,8 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* ProcessRegionOpen is instantiated when a region server reports that it is
|
||||
* serving a region. This applies to all meta and user regions except the
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -23,17 +23,18 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
|
||||
/**
|
||||
* Abstract class that performs common operations for
|
||||
* @see #ProcessRegionClose and @see #ProcessRegionOpen
|
||||
* @see ProcessRegionClose and @see ProcessRegionOpen
|
||||
*/
|
||||
abstract class ProcessRegionStatusChange extends RegionServerOperation {
|
||||
protected final boolean isMetaTable;
|
||||
protected final HRegionInfo regionInfo;
|
||||
@SuppressWarnings({"FieldCanBeLocal"})
|
||||
private volatile MetaRegion metaRegion = null;
|
||||
protected volatile byte[] metaRegionName = null;
|
||||
|
||||
/**
|
||||
* @param master
|
||||
* @param regionInfo
|
||||
* @param master the master
|
||||
* @param regionInfo region info
|
||||
*/
|
||||
public ProcessRegionStatusChange(HMaster master, HRegionInfo regionInfo) {
|
||||
super(master);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
|
@ -33,10 +27,15 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Instantiated when a server's lease has expired, meaning it has crashed.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,23 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -57,6 +40,23 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* Class to manage assigning regions to servers, state of root and meta, etc.
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,14 +19,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.concurrent.Delayed;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.Delayed;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
abstract class RegionServerOperation implements Delayed, HConstants {
|
||||
protected static final Log LOG =
|
||||
LogFactory.getLog(RegionServerOperation.class.getName());
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,21 +20,21 @@
|
|||
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.TableNotDisabledException;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
|
||||
/**
|
||||
* Uses Callable pattern so that operations against meta regions do not need
|
||||
* to duplicate retry logic.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,11 +19,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Scanner for the <code>ROOT</code> HRegion. */
|
||||
class RootScanner extends BaseScanner {
|
||||
/**
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,18 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -52,6 +40,18 @@ import org.apache.zookeeper.WatchedEvent;
|
|||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.Watcher.Event.EventType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* The ServerManager class manages info about region servers - HServerInfo,
|
||||
* load numbers, dying servers, etc.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -31,6 +29,8 @@ import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Instantiated to delete a table. Table must be offline.
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
|
@ -36,6 +30,12 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
/**
|
||||
* Abstract base class for operations that need to examine all HRegionInfo
|
||||
* objects in a table. (For a table, operate on each of its rows
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.master;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
|
@ -29,6 +27,8 @@ import org.apache.zookeeper.WatchedEvent;
|
|||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.Watcher.Event.EventType;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
|
||||
/**
|
||||
* ZooKeeper watcher for the master address. Also watches the cluster state
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
public interface ChangedReadersObserver {
|
||||
/**
|
||||
* Notify observers.
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
void updateReaders() throws IOException;
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,13 +19,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -37,6 +30,13 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* Compact region on request and then run split if appropriate
|
||||
*/
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,53 +19,53 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.DroppedSnapshotException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.DroppedSnapshotException;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.filter.Filter;
|
||||
import org.apache.hadoop.hbase.io.HeapSize;
|
||||
import org.apache.hadoop.hbase.io.Reference.Range;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
/**
|
||||
* HRegion stores data for a certain region of a table. It stores all columns
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,6 +19,65 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HMsg.Type;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HServerLoad;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LeaseListener;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.MultiPut;
|
||||
import org.apache.hadoop.hbase.client.MultiPutResponse;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.ServerConnection;
|
||||
import org.apache.hadoop.hbase.client.ServerConnectionManager;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseServer;
|
||||
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.InfoServer;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.net.DNS;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.Watcher.Event.EventType;
|
||||
import org.apache.zookeeper.Watcher.Event.KeeperState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.Thread.UncaughtExceptionHandler;
|
||||
import java.lang.management.ManagementFactory;
|
||||
|
@ -50,65 +109,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Chore;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HMsg;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HServerAddress;
|
||||
import org.apache.hadoop.hbase.HServerInfo;
|
||||
import org.apache.hadoop.hbase.HServerLoad;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.LeaseListener;
|
||||
import org.apache.hadoop.hbase.Leases;
|
||||
import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.HMsg.Type;
|
||||
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.ServerConnection;
|
||||
import org.apache.hadoop.hbase.client.ServerConnectionManager;
|
||||
import org.apache.hadoop.hbase.client.MultiPutResponse;
|
||||
import org.apache.hadoop.hbase.client.MultiPut;
|
||||
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPC;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRPCProtocolVersion;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseServer;
|
||||
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
|
||||
import org.apache.hadoop.hbase.ipc.HRegionInterface;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.InfoServer;
|
||||
import org.apache.hadoop.hbase.util.Sleeper;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
|
||||
import org.apache.hadoop.io.MapWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.net.DNS;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.zookeeper.WatchedEvent;
|
||||
import org.apache.zookeeper.Watcher;
|
||||
import org.apache.zookeeper.Watcher.Event.EventType;
|
||||
import org.apache.zookeeper.Watcher.Event.KeeperState;
|
||||
|
||||
/**
|
||||
* HRegionServer makes a set of HRegions available to clients. It checks in with
|
||||
* the HMaster. There are many HRegionServers in a single HBase deployment.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -19,12 +19,12 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
||||
/**
|
||||
* Internal scanners differ from client-side scanners in that they operate on
|
||||
* HStoreKeys and byte[] instead of RowResults. This is because they are
|
||||
|
@ -42,19 +42,19 @@ import org.apache.hadoop.hbase.KeyValue;
|
|||
public interface InternalScanner extends Closeable {
|
||||
/**
|
||||
* Grab the next row's worth of values.
|
||||
* @param results
|
||||
* @param results return output array
|
||||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public boolean next(List<KeyValue> results) throws IOException;
|
||||
|
||||
/**
|
||||
* Grab the next row's worth of values with a limit on the number of values
|
||||
* to return.
|
||||
* @param result
|
||||
* @param limit
|
||||
* @param result return output array
|
||||
* @param limit limit on row count to get
|
||||
* @return true if more rows exist after this one, false if scanner is done
|
||||
* @throws IOException
|
||||
* @throws IOException e
|
||||
*/
|
||||
public boolean next(List<KeyValue> result, int limit) throws IOException;
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -20,14 +20,14 @@
|
|||
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
|
||||
/**
|
||||
* Implements a heap merge across any number of KeyValueScanners.
|
||||
* <p>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
/**
|
||||
* Copyright 2010 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -39,7 +39,7 @@ public interface KeyValueScanner {
|
|||
|
||||
/**
|
||||
* Seek the scanner at or after the specified KeyValue.
|
||||
* @param key
|
||||
* @param key seek value
|
||||
* @return true if scanner has values left, false if end of scanner
|
||||
*/
|
||||
public boolean seek(KeyValue key);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue