HBASE-12404 Task 5 from parent: Replace internal HTable constructor use with

HConnection#getTable (0.98, 0.99)

Replaced HTable under hbase-*/src/main/java. Skipped tests. Would take
till end of time to do all and some cases are cryptic. Also skipped
some mapreduce where HTable comes through in API. Can do both of
these stragglers in another issue.
Generally, if a utility class or standalone class, tried to pass in a
Connection rather than have the utility or standalone create its own
connection on each invocation; e.g. the Quota stuff. Where not possible,
noted where invocation comes from... if test or hbck, didn't worry about
it.
Some classes are just standalone and nothing to be done to avoid
a Connection setup per invocation (this is probably how it worked
in the new HTable...days anyways). Some classes are not used:
AggregationClient, FavoredNodes... we should just purge this stuff.
Doc on what short circuit connection does (I can just use it...
I thought it was just for short circuit but no, it switches dependent
on where you are connecting).
Changed HConnection to super Interface ClusterConnection where safe (
internal usage by private classes only).
Doc cleanup in example usage so we do new mode rather than the old
fashion.
Used java7 idiom that allows you avoid writing out finally to call close
on implementations of Closeable.
Added a RegistryFactory.. moved it out from being inner class.
Added a utility createGetClosestRowOrBeforeReverseScan method to Scan
to create a Scan that can ...
Renamed getShortCircuitConnection as getConnection – users don't need
to know what implementation does (that it can short-circuit RPC).
The old name gave pause. I was frightened to use it thinking it only
for short-circuit reading – that it would not do remote too.
Squashed commit of the following:
This commit is contained in:
stack 2014-11-25 08:15:20 -08:00
parent f2be914f73
commit e6b4300756
111 changed files with 2071 additions and 1859 deletions

View File

@ -34,11 +34,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
@ -173,13 +175,18 @@ public class MetaTableAccessor {
* @throws IOException * @throws IOException
* @SuppressWarnings("deprecation") * @SuppressWarnings("deprecation")
*/ */
private static Table getHTable(final Connection connection, private static Table getHTable(final Connection connection, final TableName tableName)
final TableName tableName)
throws IOException { throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection // We used to pass whole CatalogTracker in here, now we just pass in Connection
if (connection == null || connection.isClosed()) { if (connection == null || connection.isClosed()) {
throw new NullPointerException("No connection"); throw new NullPointerException("No connection");
} }
// If the passed in 'connection' is 'managed' -- i.e. every second test uses
// an HTable or an HBaseAdmin with managed connections -- then doing
// connection.getTable will throw an exception saying you are NOT to use
// managed connections getting tables. Leaving this as it is for now. Will
// revisit when inclined to change all tests. User code probaby makes use of
// managed connections too so don't change it till post hbase 1.0.
return new HTable(tableName, connection); return new HTable(tableName, connection);
} }
@ -216,8 +223,7 @@ public class MetaTableAccessor {
* @deprecated use {@link #getRegionLocation(Connection, byte[])} instead * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead
*/ */
@Deprecated @Deprecated
public static Pair<HRegionInfo, ServerName> getRegion( public static Pair<HRegionInfo, ServerName> getRegion(Connection connection, byte [] regionName)
Connection connection, byte [] regionName)
throws IOException { throws IOException {
HRegionLocation location = getRegionLocation(connection, regionName); HRegionLocation location = getRegionLocation(connection, regionName);
return location == null return location == null
@ -887,11 +893,23 @@ public class MetaTableAccessor {
*/ */
public static int getRegionCount(final Configuration c, final TableName tableName) public static int getRegionCount(final Configuration c, final TableName tableName)
throws IOException { throws IOException {
HTable t = new HTable(c, tableName); try (Connection connection = ConnectionFactory.createConnection(c)) {
try { return getRegionCount(connection, tableName);
return t.getRegionLocations().size(); }
} finally { }
t.close();
/**
* Count regions in <code>hbase:meta</code> for passed table.
* @param connection Connection object
* @param tableName table name to count regions for
* @return Count or regions in table <code>tableName</code>
* @throws IOException
*/
public static int getRegionCount(final Connection connection, final TableName tableName)
throws IOException {
try (RegionLocator locator = connection.getRegionLocator(tableName)) {
List<HRegionLocation> locations = locator.getAllRegionLocations();
return locations == null? 0: locations.size();
} }
} }

View File

@ -36,21 +36,18 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
/** /**
* An internal class that adapts a {@link HConnection}. * An internal class that delegates to an {@link HConnection} instance.
* HConnection is created from HConnectionManager. The default * A convenience to override when customizing method implementations.
* implementation talks to region servers over RPC since it *
* doesn't know if the connection is used by one region server
* itself. This adapter makes it possible to change some of the
* default logic. Especially, when the connection is used
* internally by some the region server.
* *
* @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName, * @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName,
* AdminService.BlockingInterface, ClientService.BlockingInterface) * AdminService.BlockingInterface, ClientService.BlockingInterface) for case where we make
* Connections skip RPC if request is to local server.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
//NOTE: DO NOT make this class public. It was made package-private on purpose. //NOTE: DO NOT make this class public. It was made package-private on purpose.
class ConnectionAdapter implements ClusterConnection { abstract class ConnectionAdapter implements ClusterConnection {
private final ClusterConnection wrappedConnection; private final ClusterConnection wrappedConnection;

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
* A non-instantiable class that manages creation of {@link Connection}s. * A non-instantiable class that manages creation of {@link Connection}s.
* Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of * Managing the lifecycle of the {@link Connection}s to the cluster is the responsibility of
* the caller. * the caller.
* From this {@link Connection} {@link Table} implementations are retrieved * From a {@link Connection}, {@link Table} implementations are retrieved
* with {@link Connection#getTable(TableName)}. Example: * with {@link Connection#getTable(TableName)}. Example:
* <pre> * <pre>
* Connection connection = ConnectionFactory.createConnection(config); * Connection connection = ConnectionFactory.createConnection(config);

View File

@ -180,7 +180,7 @@ import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
/** /**
* An internal, A non-instantiable class that manages creation of {@link HConnection}s. * An internal, non-instantiable class that manages creation of {@link HConnection}s.
*/ */
@SuppressWarnings("serial") @SuppressWarnings("serial")
@InterfaceAudience.Private @InterfaceAudience.Private
@ -774,16 +774,7 @@ class ConnectionManager {
* @throws IOException * @throws IOException
*/ */
private Registry setupRegistry() throws IOException { private Registry setupRegistry() throws IOException {
String registryClass = this.conf.get("hbase.client.registry.impl", return RegistryFactory.getRegistry(this);
ZooKeeperRegistry.class.getName());
Registry registry = null;
try {
registry = (Registry)Class.forName(registryClass).newInstance();
} catch (Throwable t) {
throw new IOException(t);
}
registry.init(this);
return registry;
} }
/** /**
@ -1010,8 +1001,8 @@ class ConnectionManager {
@Override @Override
public List<HRegionLocation> locateRegions(final TableName tableName, public List<HRegionLocation> locateRegions(final TableName tableName,
final boolean useCache, final boolean offlined) throws IOException { final boolean useCache, final boolean offlined) throws IOException {
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this, NavigableMap<HRegionInfo, ServerName> regions =
tableName); MetaScanner.allTableRegions(conf, this, tableName);
final List<HRegionLocation> locations = new ArrayList<HRegionLocation>(); final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
for (HRegionInfo regionInfo : regions.keySet()) { for (HRegionInfo regionInfo : regions.keySet()) {
RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);

View File

@ -104,14 +104,14 @@ public class ConnectionUtils {
/** /**
* Adapt a HConnection so that it can bypass the RPC layer (serialization, * Adapt a HConnection so that it can bypass the RPC layer (serialization,
* deserialization, networking, etc..) when it talks to a local server. * deserialization, networking, etc..) -- i.e. short-circuit -- when talking to a local server.
* @param conn the connection to adapt * @param conn the connection to adapt
* @param serverName the local server name * @param serverName the local server name
* @param admin the admin interface of the local server * @param admin the admin interface of the local server
* @param client the client interface of the local server * @param client the client interface of the local server
* @return an adapted/decorated HConnection * @return an adapted/decorated HConnection
*/ */
public static HConnection createShortCircuitHConnection(final Connection conn, public static ClusterConnection createShortCircuitHConnection(final Connection conn,
final ServerName serverName, final AdminService.BlockingInterface admin, final ServerName serverName, final AdminService.BlockingInterface admin,
final ClientService.BlockingInterface client) { final ClientService.BlockingInterface client) {
return new ConnectionAdapter(conn) { return new ConnectionAdapter(conn) {

View File

@ -80,24 +80,24 @@ import com.google.protobuf.Service;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
/** /**
* * An implementation of {@link Table}. Used to communicate with a single HBase table.
* HTable is no longer a client API. It is marked InterfaceAudience.Private indicating that
* this is an HBase-internal class as defined in
* https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html
* There are no guarantees for backwards source / binary compatibility and methods or class can
* change or go away without deprecation. Use {@link Connection#getTable(TableName)}
* to obtain an instance of {@link Table} instead of constructing an HTable directly.
* <p>An implementation of {@link Table}. Used to communicate with a single HBase table.
* Lightweight. Get as needed and just close when done. * Lightweight. Get as needed and just close when done.
* Instances of this class SHOULD NOT be constructed directly. * Instances of this class SHOULD NOT be constructed directly.
* Obtain an instance via {@link Connection}. See {@link ConnectionFactory} * Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
* class comment for an example of how. * class comment for an example of how.
* *
* <p>This class is NOT thread safe for reads nor write. * <p>This class is NOT thread safe for reads nor writes.
* In the case of writes (Put, Delete), the underlying write buffer can * In the case of writes (Put, Delete), the underlying write buffer can
* be corrupted if multiple threads contend over a single HTable instance. * be corrupted if multiple threads contend over a single HTable instance.
* In the case of reads, some fields used by a Scan are shared among all threads. * In the case of reads, some fields used by a Scan are shared among all threads.
* *
* <p>HTable is no longer a client API. Use {@link Table} instead. It is marked
* InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
* <a href="https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html">Hadoop
* Interface Classification</a>
* There are no guarantees for backwards source / binary compatibility and methods or class can
* change or go away without deprecation.
*
* @see Table * @see Table
* @see Admin * @see Admin
* @see Connection * @see Connection
@ -163,8 +163,6 @@ public class HTable implements HTableInterface, RegionLocator {
this(conf, TableName.valueOf(tableName)); this(conf, TableName.valueOf(tableName));
} }
/** /**
* Creates an object to access a HBase table. * Creates an object to access a HBase table.
* @param conf Configuration object to use. * @param conf Configuration object to use.
@ -291,6 +289,8 @@ public class HTable implements HTableInterface, RegionLocator {
/** /**
* Creates an object to access a HBase table. * Creates an object to access a HBase table.
* Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to
* get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
* @param tableName Name of the table. * @param tableName Name of the table.
* @param connection HConnection to be used. * @param connection HConnection to be used.
* @param pool ExecutorService to be used. * @param pool ExecutorService to be used.
@ -1793,20 +1793,6 @@ public class HTable implements HTableInterface, RegionLocator {
return tableName + ";" + connection; return tableName + ";" + connection;
} }
/**
* Run basic test.
* @param args Pass table name and row and will get the content.
* @throws IOException
*/
public static void main(String[] args) throws IOException {
Table t = new HTable(HBaseConfiguration.create(), args[0]);
try {
System.out.println(t.get(new Get(Bytes.toBytes(args[1]))));
} finally {
t.close();
}
}
/** /**
* {@inheritDoc} * {@inheritDoc}
*/ */

View File

@ -41,6 +41,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.ExceptionUtil;
import com.google.common.annotations.VisibleForTesting;
/** /**
* Scanner class that contains the <code>hbase:meta</code> table scanning logic. * Scanner class that contains the <code>hbase:meta</code> table scanning logic.
* Provided visitors will be called for each row. * Provided visitors will be called for each row.
@ -60,12 +62,15 @@ public class MetaScanner {
* Scans the meta table and calls a visitor on each RowResult and uses a empty * Scans the meta table and calls a visitor on each RowResult and uses a empty
* start row value as table name. * start row value as table name.
* *
* <p>Visible for testing. Use {@link
* #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
*
* @param configuration conf * @param configuration conf
* @param visitor A custom visitor * @param visitor A custom visitor
* @throws IOException e * @throws IOException e
*/ */
public static void metaScan(Configuration configuration, @VisibleForTesting // Do not use. Used by tests only and hbck.
MetaScannerVisitor visitor) public static void metaScan(Configuration configuration, MetaScannerVisitor visitor)
throws IOException { throws IOException {
metaScan(configuration, visitor, null, null, Integer.MAX_VALUE); metaScan(configuration, visitor, null, null, Integer.MAX_VALUE);
} }
@ -92,6 +97,9 @@ public class MetaScanner {
* name and a row name to locate meta regions. And it only scans at most * name and a row name to locate meta regions. And it only scans at most
* <code>rowLimit</code> of rows. * <code>rowLimit</code> of rows.
* *
* <p>Visible for testing. Use {@link
* #metaScan(Configuration, Connection, MetaScannerVisitor, TableName)} instead.
*
* @param configuration HBase configuration. * @param configuration HBase configuration.
* @param visitor Visitor object. * @param visitor Visitor object.
* @param userTableName User table name in meta table to start scan at. Pass * @param userTableName User table name in meta table to start scan at. Pass
@ -102,12 +110,12 @@ public class MetaScanner {
* will be set to default value <code>Integer.MAX_VALUE</code>. * will be set to default value <code>Integer.MAX_VALUE</code>.
* @throws IOException e * @throws IOException e
*/ */
@VisibleForTesting // Do not use. Used by Master but by a method that is used testing.
public static void metaScan(Configuration configuration, public static void metaScan(Configuration configuration,
MetaScannerVisitor visitor, TableName userTableName, byte[] row, MetaScannerVisitor visitor, TableName userTableName, byte[] row,
int rowLimit) int rowLimit)
throws IOException { throws IOException {
metaScan(configuration, null, visitor, userTableName, row, rowLimit, metaScan(configuration, null, visitor, userTableName, row, rowLimit, TableName.META_TABLE_NAME);
TableName.META_TABLE_NAME);
} }
/** /**
@ -141,25 +149,24 @@ public class MetaScanner {
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
// Calculate startrow for scan. // Calculate startrow for scan.
byte[] startRow; byte[] startRow;
ResultScanner scanner = null; // If the passed in 'connection' is 'managed' -- i.e. every second test uses
HTable metaTable = null; // an HTable or an HBaseAdmin with managed connections -- then doing
try { // connection.getTable will throw an exception saying you are NOT to use
metaTable = new HTable(TableName.META_TABLE_NAME, connection, null); // managed connections getting tables. Leaving this as it is for now. Will
// revisit when inclined to change all tests. User code probaby makes use of
// managed connections too so don't change it till post hbase 1.0.
try (Table metaTable = new HTable(TableName.META_TABLE_NAME, connection, null)) {
if (row != null) { if (row != null) {
// Scan starting at a particular row in a particular table // Scan starting at a particular row in a particular table
byte[] searchRow = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false); Result startRowResult = getClosestRowOrBefore(metaTable, tableName, row);
Result startRowResult = metaTable.getRowOrBefore(searchRow, HConstants.CATALOG_FAMILY);
if (startRowResult == null) { if (startRowResult == null) {
throw new TableNotFoundException("Cannot find row in "+ TableName throw new TableNotFoundException("Cannot find row in " + metaTable.getName() +
.META_TABLE_NAME.getNameAsString()+" for table: " " for table: " + tableName + ", row=" + Bytes.toStringBinary(row));
+ tableName + ", row=" + Bytes.toStringBinary(searchRow));
} }
HRegionInfo regionInfo = getHRegionInfo(startRowResult); HRegionInfo regionInfo = getHRegionInfo(startRowResult);
if (regionInfo == null) { if (regionInfo == null) {
throw new IOException("HRegionInfo was null or empty in Meta for " + throw new IOException("HRegionInfo was null or empty in Meta for " +
tableName + ", row=" + Bytes.toStringBinary(searchRow)); tableName + ", row=" + Bytes.toStringBinary(row));
} }
byte[] rowBefore = regionInfo.getStartKey(); byte[] rowBefore = regionInfo.getStartKey();
startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false); startRow = HRegionInfo.createRegionName(tableName, rowBefore, HConstants.ZEROES, false);
@ -184,25 +191,18 @@ public class MetaScanner {
Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows); Bytes.toStringBinary(startRow) + " for max=" + rowUpperLimit + " with caching=" + rows);
} }
// Run the scan // Run the scan
scanner = metaTable.getScanner(scan); try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
Result result; Result result;
int processedRows = 0; int processedRows = 0;
while ((result = scanner.next()) != null) { while ((result = resultScanner.next()) != null) {
if (visitor != null) { if (visitor != null) {
if (!visitor.processRow(result)) break; if (!visitor.processRow(result)) break;
} }
processedRows++; processedRows++;
if (processedRows >= rowUpperLimit) break; if (processedRows >= rowUpperLimit) break;
} }
}
} finally { } finally {
if (scanner != null) {
try {
scanner.close();
} catch (Throwable t) {
ExceptionUtil.rethrowIfInterrupt(t);
LOG.debug("Got exception in closing the result scanner", t);
}
}
if (visitor != null) { if (visitor != null) {
try { try {
visitor.close(); visitor.close();
@ -211,20 +211,26 @@ public class MetaScanner {
LOG.debug("Got exception in closing the meta scanner visitor", t); LOG.debug("Got exception in closing the meta scanner visitor", t);
} }
} }
if (metaTable != null) {
try {
metaTable.close();
} catch (Throwable t) {
ExceptionUtil.rethrowIfInterrupt(t);
LOG.debug("Got exception in closing meta table", t);
}
}
if (closeConnection) { if (closeConnection) {
connection.close(); if (connection != null) connection.close();
} }
} }
} }
/**
* @return Get closest metatable region row to passed <code>row</code>
* @throws IOException
*/
private static Result getClosestRowOrBefore(final Table metaTable, final TableName userTableName,
final byte [] row)
throws IOException {
byte[] searchRow = HRegionInfo.createRegionName(userTableName, row, HConstants.NINES, false);
Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(searchRow);
try (ResultScanner resultScanner = metaTable.getScanner(scan)) {
return resultScanner.next();
}
}
/** /**
* Returns HRegionInfo object from the column * Returns HRegionInfo object from the column
* HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog
@ -246,6 +252,7 @@ public class MetaScanner {
* @return List of all user-space regions. * @return List of all user-space regions.
* @throws IOException * @throws IOException
*/ */
@VisibleForTesting // And for hbck.
public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined) public static List<HRegionInfo> listAllRegions(Configuration conf, final boolean offlined)
throws IOException { throws IOException {
final List<HRegionInfo> regions = new ArrayList<HRegionInfo>(); final List<HRegionInfo> regions = new ArrayList<HRegionInfo>();

View File

@ -20,11 +20,14 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/** /**
* Cluster registry. * Cluster registry.
* Implemenations hold cluster information such as this cluster's id, location of hbase:meta, etc. * Implementations hold cluster information such as this cluster's id, location of hbase:meta, etc.
* Internal use only.
*/ */
@InterfaceAudience.Private
interface Registry { interface Registry {
/** /**
* @param connection * @param connection

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* Get instance of configured Registry.
*/
@InterfaceAudience.Private
class RegistryFactory {
/**
* @return The cluster registry implementation to use.
* @throws IOException
*/
static Registry getRegistry(final Connection connection)
throws IOException {
String registryClass = connection.getConfiguration().get("hbase.client.registry.impl",
ZooKeeperRegistry.class.getName());
Registry registry = null;
try {
registry = (Registry)Class.forName(registryClass).newInstance();
} catch (Throwable t) {
throw new IOException(t);
}
registry.init(connection);
return registry;
}
}

View File

@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.util.Bytes;
* To scan everything for each row, instantiate a Scan object. * To scan everything for each row, instantiate a Scan object.
* <p> * <p>
* To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}. * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
* If caching is NOT set, we will use the caching value of the hosting {@link HTable}. See * If caching is NOT set, we will use the caching value of the hosting {@link Table}.
* {@link HTable#setScannerCaching(int)}. In addition to row caching, it is possible to specify a * In addition to row caching, it is possible to specify a
* maximum result size, using {@link #setMaxResultSize(long)}. When both are used, * maximum result size, using {@link #setMaxResultSize(long)}. When both are used,
* single server requests are limited by either number of rows or maximum result size, whichever * single server requests are limited by either number of rows or maximum result size, whichever
* limit comes first. * limit comes first.
@ -478,7 +478,8 @@ public class Scan extends Query {
/** /**
* Set the number of rows for caching that will be passed to scanners. * Set the number of rows for caching that will be passed to scanners.
* If not set, the default setting from {@link HTable#getScannerCaching()} will apply. * If not set, the Configuration setting {@link HConstants#HBASE_CLIENT_SCANNER_CACHING} will
* apply.
* Higher caching values will enable faster scanners but will use more memory. * Higher caching values will enable faster scanners but will use more memory.
* @param caching the number of rows for caching * @param caching the number of rows for caching
*/ */
@ -894,4 +895,21 @@ public class Scan extends Query {
return (Scan) super.setIsolationLevel(level); return (Scan) super.setIsolationLevel(level);
} }
/**
* Utility that creates a Scan that will do a small scan in reverse from passed row
* looking for next closest row.
* @param row
* @param family
* @return An instance of Scan primed with passed <code>row</code> and <code>family</code> to
* scan in reverse for one row only.
*/
static Scan createGetClosestRowOrBeforeReverseScan(byte[] row) {
// Below does not work if you add in family; need to add the family qualifier that is highest
// possible family qualifier. Do we have such a notion? Would have to be magic.
Scan scan = new Scan(row);
scan.setSmall(true);
scan.setReversed(true);
scan.setCaching(1);
return scan;
}
} }

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.client.coprocessor; package org.apache.hadoop.hbase.client.coprocessor;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.ArrayList; import java.util.ArrayList;
@ -36,7 +37,8 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
@ -72,19 +74,32 @@ import com.google.protobuf.Message;
* <li>For methods to find maximum, minimum, sum, rowcount, it returns the * <li>For methods to find maximum, minimum, sum, rowcount, it returns the
* parameter type. For average and std, it returns a double value. For row * parameter type. For average and std, it returns a double value. For row
* count, it returns a long value. * count, it returns a long value.
* <p>Call {@link #close()} when done.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class AggregationClient { public class AggregationClient implements Closeable {
// TODO: This class is not used. Move to examples?
private static final Log log = LogFactory.getLog(AggregationClient.class); private static final Log log = LogFactory.getLog(AggregationClient.class);
Configuration conf; private final Connection connection;
/** /**
* Constructor with Conf object * Constructor with Conf object
* @param cfg * @param cfg
*/ */
public AggregationClient(Configuration cfg) { public AggregationClient(Configuration cfg) {
this.conf = cfg; try {
// Create a connection on construction. Will use it making each of the calls below.
this.connection = ConnectionFactory.createConnection(cfg);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void close() throws IOException {
if (this.connection != null && !this.connection.isClosed()) {
this.connection.close();
}
} }
/** /**
@ -102,14 +117,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> R max( public <R, S, P extends Message, Q extends Message, T extends Message> R max(
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable { throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return max(table, ci, scan); return max(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -197,14 +206,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> R min( public <R, S, P extends Message, Q extends Message, T extends Message> R min(
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable { throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return min(table, ci, scan); return min(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -277,14 +280,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount( public <R, S, P extends Message, Q extends Message, T extends Message> long rowCount(
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable { throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return rowCount(table, ci, scan); return rowCount(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -351,14 +348,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> S sum( public <R, S, P extends Message, Q extends Message, T extends Message> S sum(
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable { throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return sum(table, ci, scan); return sum(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -424,14 +415,8 @@ public class AggregationClient {
private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs( private <R, S, P extends Message, Q extends Message, T extends Message> Pair<S, Long> getAvgArgs(
final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan) final TableName tableName, final ColumnInterpreter<R, S, P, Q, T> ci, final Scan scan)
throws Throwable { throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return getAvgArgs(table, ci, scan); return getAvgArgs(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -615,14 +600,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> public <R, S, P extends Message, Q extends Message, T extends Message>
double std(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci, double std(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
Scan scan) throws Throwable { Scan scan) throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return std(table, ci, scan); return std(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }
@ -728,14 +707,8 @@ public class AggregationClient {
public <R, S, P extends Message, Q extends Message, T extends Message> public <R, S, P extends Message, Q extends Message, T extends Message>
R median(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci, R median(final TableName tableName, ColumnInterpreter<R, S, P, Q, T> ci,
Scan scan) throws Throwable { Scan scan) throws Throwable {
Table table = null; try (Table table = connection.getTable(tableName)) {
try {
table = new HTable(conf, tableName);
return median(table, ci, scan); return median(table, ci, scan);
} finally {
if (table != null) {
table.close();
}
} }
} }

View File

@ -50,9 +50,9 @@ must:
method should return a reference to the Endpoint's protocol buffer Service instance. method should return a reference to the Endpoint's protocol buffer Service instance.
</ul> </ul>
Clients may then call the defined service methods on coprocessor instances via Clients may then call the defined service methods on coprocessor instances via
the {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])}, the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])},
{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and
{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
methods. methods.
</p> </p>
@ -65,21 +65,21 @@ to identify which regions should be used for the method invocations. Clients
can call coprocessor Service methods against either: can call coprocessor Service methods against either:
<ul> <ul>
<li><strong>a single region</strong> - calling <li><strong>a single region</strong> - calling
{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(byte[])} {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}
with a single row key. This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel} with a single row key. This returns a {@link org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel}
instance which communicates with the region containing the given row key (even if the instance which communicates with the region containing the given row key (even if the
row does not exist) as the RPC endpoint. Clients can then use the {@code CoprocessorRpcChannel} row does not exist) as the RPC endpoint. Clients can then use the {@code CoprocessorRpcChannel}
instance in creating a new Service stub to call RPC methods on the region's coprocessor.</li> instance in creating a new Service stub to call RPC methods on the region's coprocessor.</li>
<li><strong>a range of regions</strong> - calling <li><strong>a range of regions</strong> - calling
{@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
or {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)} or {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call, org.apache.hadoop.hbase.client.coprocessor.Batch.Callback)}
with a starting row key and an ending row key. All regions in the table with a starting row key and an ending row key. All regions in the table
from the region containing the start row key to the region containing the end from the region containing the start row key to the region containing the end
row key (inclusive), will we used as the RPC endpoints.</li> row key (inclusive), will we used as the RPC endpoints.</li>
</ul> </ul>
</p> </p>
<p><em>Note that the row keys passed as parameters to the <code>HTable</code> <p><em>Note that the row keys passed as parameters to the <code>Table</code>
methods are not passed directly to the coprocessor Service implementations. methods are not passed directly to the coprocessor Service implementations.
They are only used to identify the regions for endpoints of the remote calls. They are only used to identify the regions for endpoints of the remote calls.
</em></p> </em></p>
@ -160,7 +160,8 @@ use:
<div style="background-color: #cccccc; padding: 2px"> <div style="background-color: #cccccc; padding: 2px">
<blockquote><pre> <blockquote><pre>
HTable table = new HTable(conf, "mytable"); Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf("mytable"));
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance(); final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
Map<byte[],Long> results = table.coprocessorService( Map<byte[],Long> results = table.coprocessorService(
ExampleProtos.RowCountService.class, // the protocol interface we're invoking ExampleProtos.RowCountService.class, // the protocol interface we're invoking
@ -186,7 +187,7 @@ of <code>mytable</code>, keyed by the region name.
By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call} By implementing {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call}
as an anonymous class, we can invoke <code>RowCountService</code> methods as an anonymous class, we can invoke <code>RowCountService</code> methods
directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} directly against the {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)}
method's argument. Calling {@link org.apache.hadoop.hbase.client.HTable#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)} method's argument. Calling {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}
will take care of invoking <code>Batch.Call.call()</code> against our anonymous class will take care of invoking <code>Batch.Call.call()</code> against our anonymous class
with the <code>RowCountService</code> instance for each table region. with the <code>RowCountService</code> instance for each table region.
</p> </p>
@ -199,7 +200,8 @@ like to combine row count and key-value count for each region:
<div style="background-color: #cccccc; padding: 2px"> <div style="background-color: #cccccc; padding: 2px">
<blockquote><pre> <blockquote><pre>
HTable table = new HTable(conf, "mytable"); Connection connection = ConnectionFactory.createConnection(conf);
Table table = connection.getTable(TableName.valueOf("mytable"));
// combine row count and kv count for region // combine row count and kv count for region
final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance(); final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
Map<byte[],Long> results = table.coprocessorService( Map<byte[],Long> results = table.coprocessorService(

View File

@ -28,23 +28,26 @@ Provides HBase Client
<h2><a name="overview">Overview</a></h2> <h2><a name="overview">Overview</a></h2>
<p>To administer HBase, create and drop tables, list and alter tables, <p>To administer HBase, create and drop tables, list and alter tables,
use {@link org.apache.hadoop.hbase.client.HBaseAdmin}. Once created, table access is via an instance use {@link org.apache.hadoop.hbase.client.Admin}. Once created, table access is via an instance
of {@link org.apache.hadoop.hbase.client.HTable}. You add content to a table a row at a time. To insert, of {@link org.apache.hadoop.hbase.client.Table}. You add content to a table a row at a time. To
create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value, target column insert, create an instance of a {@link org.apache.hadoop.hbase.client.Put} object. Specify value,
and optionally a timestamp. Commit your update using {@link org.apache.hadoop.hbase.client.HTable#put(Put)}. target column and optionally a timestamp. Commit your update using
To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be specified to be broad -- get all {@link org.apache.hadoop.hbase.client.Table#put(Put)}.
on a particular row -- or narrow; i.e. return only a single cell value. After creating an instance of To fetch your inserted value, use {@link org.apache.hadoop.hbase.client.Get}. The Get can be
Get, invoke {@link org.apache.hadoop.hbase.client.HTable#get(Get)}. Use specified to be broad -- get all on a particular row -- or narrow; i.e. return only a single cell
{@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access. After value. After creating an instance of
creating and configuring your Scan instance, call {@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} and then Get, invoke {@link org.apache.hadoop.hbase.client.Table#get(Get)}.
invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.HTable#get(Get)} and
{@link org.apache.hadoop.hbase.client.HTable#getScanner(Scan)} return a <p>Use {@link org.apache.hadoop.hbase.client.Scan} to set up a scanner -- a Cursor- like access.
After creating and configuring your Scan instance, call
{@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} and then
invoke next on the returned object. Both {@link org.apache.hadoop.hbase.client.Table#get(Get)}
and {@link org.apache.hadoop.hbase.client.Table#getScanner(Scan)} return a
{@link org.apache.hadoop.hbase.client.Result}. {@link org.apache.hadoop.hbase.client.Result}.
A Result is a List of {@link org.apache.hadoop.hbase.KeyValue}s. It has facility for packaging the return
in different formats. <p>Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
Use {@link org.apache.hadoop.hbase.client.Delete} to remove content.
You can remove individual cells or entire families, etc. Pass it to You can remove individual cells or entire families, etc. Pass it to
{@link org.apache.hadoop.hbase.client.HTable#delete(Delete)} to execute. {@link org.apache.hadoop.hbase.client.Table#delete(Delete)} to execute.
</p> </p>
<p>Puts, Gets and Deletes take out a lock on the target row for the duration of their operation. <p>Puts, Gets and Deletes take out a lock on the target row for the duration of their operation.
Concurrent modifications to a single row are serialized. Gets and scans run concurrently without Concurrent modifications to a single row are serialized. Gets and scans run concurrently without
@ -68,8 +71,11 @@ in different formats.
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
@ -87,9 +93,18 @@ public class MyLittleHBaseClient {
// be found on the CLASSPATH // be found on the CLASSPATH
Configuration config = HBaseConfiguration.create(); Configuration config = HBaseConfiguration.create();
// This instantiates an HTable object that connects you to // Next you need a Connection to the cluster. Create one. When done with it,
// the "myLittleHBaseTable" table. // close it (Should start a try/finally after this creation so it gets closed
HTable table = new HTable(config, "myLittleHBaseTable"); // for sure but leaving this out for readibility's sake).
Connection connection = ConnectionFactory.createConnection(config);
try {
// This instantiates a Table object that connects you to
// the "myLittleHBaseTable" table (TableName.valueOf turns String into TableName instance).
// When done with it, close it (Should start a try/finally after this creation so it gets
// closed for sure but leaving this out for readibility's sake).
Table table = connection.getTable(TableName.valueOf("myLittleHBaseTable"));
try {
// To add to a row, use Put. A Put constructor takes the name of the row // To add to a row, use Put. A Put constructor takes the name of the row
// you want to insert into as a byte array. In HBase, the Bytes class has // you want to insert into as a byte array. In HBase, the Bytes class has
@ -152,15 +167,23 @@ public class MyLittleHBaseClient {
// Thats why we have it inside a try/finally clause // Thats why we have it inside a try/finally clause
scanner.close(); scanner.close();
} }
// Close your table and cluster connection.
} finally {
if (table != null) table.close();
}
} finally {
connection.close();
}
} }
} }
</pre></blockquote> </pre></blockquote>
</div> </div>
<p>There are many other methods for putting data into and getting data out of <p>There are many other methods for putting data into and getting data out of
HBase, but these examples should get you started. See the HTable javadoc for HBase, but these examples should get you started. See the Table javadoc for
more methods. Additionally, there are methods for managing tables in the more methods. Additionally, there are methods for managing tables in the
HBaseAdmin class.</p> Admin class.</p>
<p>If your client is NOT Java, then you should consider the Thrift or REST <p>If your client is NOT Java, then you should consider the Thrift or REST
libraries.</p> libraries.</p>
@ -168,20 +191,14 @@ public class MyLittleHBaseClient {
<h2><a name="related" >Related Documentation</a></h2> <h2><a name="related" >Related Documentation</a></h2>
<ul> <ul>
<li><a href="http://hbase.org">HBase Home Page</a> <li><a href="http://hbase.org">HBase Home Page</a>
<li><a href="http://wiki.apache.org/hadoop/Hbase">HBase Wiki</a>
<li><a href="http://hadoop.apache.org/">Hadoop Home Page</a> <li><a href="http://hadoop.apache.org/">Hadoop Home Page</a>
</ul> </ul>
</pre></code> </pre></code>
</div> </div>
<p>There are many other methods for putting data into and getting data out of
HBase, but these examples should get you started. See the HTable javadoc for
more methods. Additionally, there are methods for managing tables in the
HBaseAdmin class.</p>
<p>See also the section in the HBase Reference Guide where it discusses <p>See also the section in the HBase Reference Guide where it discusses
<a href="http://hbase.apache.org/book.html#client">HBase Client</a>. It <a href="http://hbase.apache.org/book.html#client">HBase Client</a>. It
has section on how to access HBase from inside your multithreaded environtment has section on how to access HBase from inside your multithreaded environment
how to control resources consumed client-side, etc.</p> how to control resources consumed client-side, etc.</p>
</body> </body>
</html> </html>

View File

@ -30,10 +30,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -47,23 +49,40 @@ public class QuotaRetriever implements Closeable, Iterable<QuotaSettings> {
private final Queue<QuotaSettings> cache = new LinkedList<QuotaSettings>(); private final Queue<QuotaSettings> cache = new LinkedList<QuotaSettings>();
private ResultScanner scanner; private ResultScanner scanner;
private HTable table; /**
* Connection to use.
* Could pass one in and have this class use it but this class wants to be standalone.
*/
private Connection connection;
private Table table;
private QuotaRetriever() { private QuotaRetriever() {
} }
void init(final Configuration conf, final Scan scan) throws IOException { void init(final Configuration conf, final Scan scan) throws IOException {
table = new HTable(conf, QuotaTableUtil.QUOTA_TABLE_NAME); this.connection = ConnectionFactory.createConnection(conf);
this.table = this.connection.getTable(QuotaTableUtil.QUOTA_TABLE_NAME);
try { try {
scanner = table.getScanner(scan); scanner = table.getScanner(scan);
} catch (IOException e) { } catch (IOException e) {
table.close(); try {
close();
} catch (IOException ioe) {
LOG.warn("Failed getting scanner and then failed close on cleanup", e);
}
throw e; throw e;
} }
} }
public void close() throws IOException { public void close() throws IOException {
table.close(); if (this.table != null) {
this.table.close();
this.table = null;
}
if (this.connection != null) {
this.connection.close();
this.connection = null;
}
} }
public QuotaSettings next() throws IOException { public QuotaSettings next() throws IOException {

View File

@ -27,15 +27,15 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.FilterList;
@ -78,41 +78,42 @@ public class QuotaTableUtil {
/* ========================================================================= /* =========================================================================
* Quota "settings" helpers * Quota "settings" helpers
*/ */
public static Quotas getTableQuota(final Configuration conf, final TableName table) public static Quotas getTableQuota(final Connection connection, final TableName table)
throws IOException { throws IOException {
return getQuotas(conf, getTableRowKey(table)); return getQuotas(connection, getTableRowKey(table));
} }
public static Quotas getNamespaceQuota(final Configuration conf, final String namespace) public static Quotas getNamespaceQuota(final Connection connection, final String namespace)
throws IOException { throws IOException {
return getQuotas(conf, getNamespaceRowKey(namespace)); return getQuotas(connection, getNamespaceRowKey(namespace));
} }
public static Quotas getUserQuota(final Configuration conf, final String user) public static Quotas getUserQuota(final Connection connection, final String user)
throws IOException { throws IOException {
return getQuotas(conf, getUserRowKey(user)); return getQuotas(connection, getUserRowKey(user));
} }
public static Quotas getUserQuota(final Configuration conf, final String user, public static Quotas getUserQuota(final Connection connection, final String user,
final TableName table) throws IOException { final TableName table) throws IOException {
return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserTable(table)); return getQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table));
} }
public static Quotas getUserQuota(final Configuration conf, final String user, public static Quotas getUserQuota(final Connection connection, final String user,
final String namespace) throws IOException { final String namespace) throws IOException {
return getQuotas(conf, getUserRowKey(user), getSettingsQualifierForUserNamespace(namespace)); return getQuotas(connection, getUserRowKey(user),
getSettingsQualifierForUserNamespace(namespace));
} }
private static Quotas getQuotas(final Configuration conf, final byte[] rowKey) private static Quotas getQuotas(final Connection connection, final byte[] rowKey)
throws IOException { throws IOException {
return getQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS); return getQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS);
} }
private static Quotas getQuotas(final Configuration conf, final byte[] rowKey, private static Quotas getQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier) throws IOException { final byte[] qualifier) throws IOException {
Get get = new Get(rowKey); Get get = new Get(rowKey);
get.addColumn(QUOTA_FAMILY_INFO, qualifier); get.addColumn(QUOTA_FAMILY_INFO, qualifier);
Result result = doGet(conf, get); Result result = doGet(connection, get);
if (result.isEmpty()) { if (result.isEmpty()) {
return null; return null;
} }
@ -321,23 +322,17 @@ public class QuotaTableUtil {
/* ========================================================================= /* =========================================================================
* HTable helpers * HTable helpers
*/ */
protected static Result doGet(final Configuration conf, final Get get) protected static Result doGet(final Connection connection, final Get get)
throws IOException { throws IOException {
HTable table = new HTable(conf, QUOTA_TABLE_NAME); try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
try {
return table.get(get); return table.get(get);
} finally {
table.close();
} }
} }
protected static Result[] doGet(final Configuration conf, final List<Get> gets) protected static Result[] doGet(final Connection connection, final List<Get> gets)
throws IOException { throws IOException {
HTable table = new HTable(conf, QUOTA_TABLE_NAME); try (Table table = connection.getTable(QUOTA_TABLE_NAME)) {
try {
return table.get(gets); return table.get(gets);
} finally {
table.close();
} }
} }

View File

@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@ -50,11 +50,7 @@ public class AccessControlClient {
public static final TableName ACL_TABLE_NAME = public static final TableName ACL_TABLE_NAME =
TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl"); TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "acl");
private static HTable getAclTable(Configuration conf) throws IOException { private static BlockingInterface getAccessControlServiceStub(Table ht)
return new HTable(conf, ACL_TABLE_NAME);
}
private static BlockingInterface getAccessControlServiceStub(HTable ht)
throws IOException { throws IOException {
CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW);
BlockingInterface protocol = BlockingInterface protocol =
@ -75,14 +71,12 @@ public class AccessControlClient {
public static void grant(Configuration conf, final TableName tableName, public static void grant(Configuration conf, final TableName tableName,
final String userName, final byte[] family, final byte[] qual, final String userName, final byte[] family, final byte[] qual,
final Permission.Action... actions) throws Throwable { final Permission.Action... actions) throws Throwable {
HTable ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = getAclTable(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, tableName, family, qual, try (Table table = connection.getTable(ACL_TABLE_NAME)) {
ProtobufUtil.grant(getAccessControlServiceStub(table), userName, tableName, family, qual,
actions); actions);
} finally {
if (ht != null) {
ht.close();
} }
} }
} }
@ -97,26 +91,22 @@ public class AccessControlClient {
*/ */
public static void grant(Configuration conf, final String namespace, public static void grant(Configuration conf, final String namespace,
final String userName, final Permission.Action... actions) throws Throwable { final String userName, final Permission.Action... actions) throws Throwable {
HTable ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = getAclTable(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
ProtobufUtil.grant(getAccessControlServiceStub(ht), userName, namespace, actions); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
} finally { ProtobufUtil.grant(getAccessControlServiceStub(table), userName, namespace, actions);
if (ht != null) {
ht.close();
} }
} }
} }
public static boolean isAccessControllerRunning(Configuration conf) public static boolean isAccessControllerRunning(Configuration conf)
throws MasterNotRunningException, ZooKeeperConnectionException, IOException { throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
HBaseAdmin ha = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ha = new HBaseAdmin(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
return ha.isTableAvailable(ACL_TABLE_NAME); try (Admin admin = connection.getAdmin()) {
} finally { return admin.isTableAvailable(ACL_TABLE_NAME);
if (ha != null) {
ha.close();
} }
} }
} }
@ -134,14 +124,12 @@ public class AccessControlClient {
public static void revoke(Configuration conf, final TableName tableName, public static void revoke(Configuration conf, final TableName tableName,
final String username, final byte[] family, final byte[] qualifier, final String username, final byte[] family, final byte[] qualifier,
final Permission.Action... actions) throws Throwable { final Permission.Action... actions) throws Throwable {
HTable ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = getAclTable(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
ProtobufUtil.revoke(getAccessControlServiceStub(ht), username, tableName, family, qualifier, try (Table table = connection.getTable(ACL_TABLE_NAME)) {
actions); ProtobufUtil.revoke(getAccessControlServiceStub(table), username, tableName, family,
} finally { qualifier, actions);
if (ht != null) {
ht.close();
} }
} }
} }
@ -156,13 +144,11 @@ public class AccessControlClient {
*/ */
public static void revoke(Configuration conf, final String namespace, public static void revoke(Configuration conf, final String namespace,
final String userName, final Permission.Action... actions) throws Throwable { final String userName, final Permission.Action... actions) throws Throwable {
HTable ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = getAclTable(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
ProtobufUtil.revoke(getAccessControlServiceStub(ht), userName, namespace, actions); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
} finally { ProtobufUtil.revoke(getAccessControlServiceStub(table), userName, namespace, actions);
if (ht != null) {
ht.close();
} }
} }
} }
@ -177,36 +163,29 @@ public class AccessControlClient {
public static List<UserPermission> getUserPermissions(Configuration conf, String tableRegex) public static List<UserPermission> getUserPermissions(Configuration conf, String tableRegex)
throws Throwable { throws Throwable {
List<UserPermission> permList = new ArrayList<UserPermission>(); List<UserPermission> permList = new ArrayList<UserPermission>();
Table ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
Admin ha = null; // setup each time. This class only used in test and shell at moment though.
try { try (Connection connection = ConnectionFactory.createConnection(conf)) {
ha = new HBaseAdmin(conf); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
ht = new HTable(conf, ACL_TABLE_NAME); try (Admin admin = connection.getAdmin()) {
CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); CoprocessorRpcChannel service = table.coprocessorService(HConstants.EMPTY_START_ROW);
BlockingInterface protocol = AccessControlProtos.AccessControlService BlockingInterface protocol =
.newBlockingStub(service); AccessControlProtos.AccessControlService.newBlockingStub(service);
HTableDescriptor[] htds = null; HTableDescriptor[] htds = null;
if (tableRegex == null || tableRegex.isEmpty()) { if (tableRegex == null || tableRegex.isEmpty()) {
permList = ProtobufUtil.getUserPermissions(protocol); permList = ProtobufUtil.getUserPermissions(protocol);
} else if (tableRegex.charAt(0) == '@') { } else if (tableRegex.charAt(0) == '@') {
String namespace = tableRegex.substring(1); String namespace = tableRegex.substring(1);
permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace)); permList = ProtobufUtil.getUserPermissions(protocol, Bytes.toBytes(namespace));
} else { } else {
htds = ha.listTables(Pattern.compile(tableRegex)); htds = admin.listTables(Pattern.compile(tableRegex));
for (HTableDescriptor hd : htds) { for (HTableDescriptor hd : htds) {
permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName())); permList.addAll(ProtobufUtil.getUserPermissions(protocol, hd.getTableName()));
} }
} }
} finally {
if (ht != null) {
ht.close();
} }
if (ha != null) {
ha.close();
} }
} }
return permList; return permList;
} }
} }

View File

@ -26,7 +26,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback; import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
@ -73,16 +74,18 @@ public class VisibilityClient {
*/ */
public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels) public static VisibilityLabelsResponse addLabels(Configuration conf, final String[] labels)
throws Throwable { throws Throwable {
Table ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = new HTable(conf, LABELS_TABLE_NAME); try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable =
new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() { new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
ServerRpcController controller = new ServerRpcController(); ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback = BlockingRpcCallback<VisibilityLabelsResponse> rpcCallback =
new BlockingRpcCallback<VisibilityLabelsResponse>(); new BlockingRpcCallback<VisibilityLabelsResponse>();
public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { public VisibilityLabelsResponse call(VisibilityLabelsService service)
throws IOException {
VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder();
for (String label : labels) { for (String label : labels) {
if (label.length() > 0) { if (label.length() > 0) {
@ -99,14 +102,11 @@ public class VisibilityClient {
return response; return response;
} }
}; };
Map<byte[], VisibilityLabelsResponse> result = ht.coprocessorService( Map<byte[], VisibilityLabelsResponse> result =
VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY,
callable); HConstants.EMPTY_BYTE_ARRAY, callable);
return result.values().iterator().next(); // There will be exactly one region for labels return result.values().iterator().next(); // There will be exactly one region for labels
// table and so one entry in result Map. // table and so one entry in result Map.
} finally {
if (ht != null) {
ht.close();
} }
} }
} }
@ -131,9 +131,10 @@ public class VisibilityClient {
* @throws Throwable * @throws Throwable
*/ */
public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable { public static GetAuthsResponse getAuths(Configuration conf, final String user) throws Throwable {
Table ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = new HTable(conf, LABELS_TABLE_NAME); try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable = Batch.Call<VisibilityLabelsService, GetAuthsResponse> callable =
new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() { new Batch.Call<VisibilityLabelsService, GetAuthsResponse>() {
ServerRpcController controller = new ServerRpcController(); ServerRpcController controller = new ServerRpcController();
@ -151,13 +152,11 @@ public class VisibilityClient {
return response; return response;
} }
}; };
Map<byte[], GetAuthsResponse> result = ht.coprocessorService(VisibilityLabelsService.class, Map<byte[], GetAuthsResponse> result =
table.coprocessorService(VisibilityLabelsService.class,
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable);
return result.values().iterator().next(); // There will be exactly one region for labels return result.values().iterator().next(); // There will be exactly one region for labels
// table and so one entry in result Map. // table and so one entry in result Map.
} finally {
if (ht != null) {
ht.close();
} }
} }
} }
@ -177,9 +176,10 @@ public class VisibilityClient {
private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths, private static VisibilityLabelsResponse setOrClearAuths(Configuration conf, final String[] auths,
final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable { final String user, final boolean setOrClear) throws IOException, ServiceException, Throwable {
Table ht = null; // TODO: Make it so caller passes in a Connection rather than have us do this expensive
try { // setup each time. This class only used in test and shell at moment though.
ht = new HTable(conf, LABELS_TABLE_NAME); try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(LABELS_TABLE_NAME)) {
Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable = Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse> callable =
new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() { new Batch.Call<VisibilityLabelsService, VisibilityLabelsResponse>() {
ServerRpcController controller = new ServerRpcController(); ServerRpcController controller = new ServerRpcController();
@ -206,14 +206,11 @@ public class VisibilityClient {
return response; return response;
} }
}; };
Map<byte[], VisibilityLabelsResponse> result = ht.coprocessorService( Map<byte[], VisibilityLabelsResponse> result = table.coprocessorService(
VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY,
callable); callable);
return result.values().iterator().next(); // There will be exactly one region for labels return result.values().iterator().next(); // There will be exactly one region for labels
// table and so one entry in result Map. // table and so one entry in result Map.
} finally {
if (ht != null) {
ht.close();
} }
} }
} }

View File

@ -363,12 +363,10 @@ public class LocalHBaseCluster {
* @return Name of master that just went down. * @return Name of master that just went down.
*/ */
public String waitOnMaster(int serverNumber) { public String waitOnMaster(int serverNumber) {
JVMClusterUtil.MasterThread masterThread = JVMClusterUtil.MasterThread masterThread = this.masterThreads.remove(serverNumber);
this.masterThreads.remove(serverNumber);
while (masterThread.isAlive()) { while (masterThread.isAlive()) {
try { try {
LOG.info("Waiting on " + LOG.info("Waiting on " + masterThread.getMaster().getServerName().toString());
masterThread.getMaster().getServerName().toString());
masterThread.join(); masterThread.join();
} catch (InterruptedException e) { } catch (InterruptedException e) {
e.printStackTrace(); e.printStackTrace();

View File

@ -18,9 +18,9 @@
*/ */
package org.apache.hadoop.hbase; package org.apache.hadoop.hbase;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -41,16 +41,12 @@ public interface Server extends Abortable, Stoppable {
ZooKeeperWatcher getZooKeeper(); ZooKeeperWatcher getZooKeeper();
/** /**
* Returns reference to wrapped short-circuit (i.e. local, bypassing RPC layer entirely) * Returns a reference to the servers' cluster connection.
* HConnection to this server, which may be used for miscellaneous needs.
* *
* Important note: this method returns reference to connection which is managed * Important note: this method returns a reference to Connection which is managed
* by Server itself, so callers must NOT attempt to close connection obtained. * by Server itself, so callers must NOT attempt to close connection obtained.
*
* See {@link org.apache.hadoop.hbase.client.ConnectionUtils#createShortCircuitHConnection}
* for details on short-circuit connections.
*/ */
HConnection getShortCircuitConnection(); ClusterConnection getConnection();
/** /**
* Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator} * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}

View File

@ -22,7 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -36,9 +36,9 @@ public class ZKTableArchiveClient extends Configured {
/** Configuration key for the archive node. */ /** Configuration key for the archive node. */
private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive"; private static final String ZOOKEEPER_ZNODE_HFILE_ARCHIVE_KEY = "zookeeper.znode.hfile.archive";
private HConnection connection; private ClusterConnection connection;
public ZKTableArchiveClient(Configuration conf, HConnection connection) { public ZKTableArchiveClient(Configuration conf, ClusterConnection connection) {
super(conf); super(conf);
this.connection = connection; this.connection = connection;
} }

View File

@ -18,10 +18,12 @@
*/ */
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import com.google.protobuf.Descriptors.MethodDescriptor; import java.io.IOException;
import com.google.protobuf.Message; import java.util.ArrayList;
import com.google.protobuf.Service; import java.util.List;
import com.google.protobuf.ServiceException; import java.util.Map;
import java.util.concurrent.ExecutorService;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -32,11 +34,10 @@ import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.io.MultipleIOException;
import java.io.IOException; import com.google.protobuf.Descriptors.MethodDescriptor;
import java.util.ArrayList; import com.google.protobuf.Message;
import java.util.List; import com.google.protobuf.Service;
import java.util.Map; import com.google.protobuf.ServiceException;
import java.util.concurrent.ExecutorService;
/** /**
* A wrapper for HTable. Can be used to restrict privilege. * A wrapper for HTable. Can be used to restrict privilege.
@ -55,7 +56,7 @@ import java.util.concurrent.ExecutorService;
public class HTableWrapper implements HTableInterface { public class HTableWrapper implements HTableInterface {
private TableName tableName; private TableName tableName;
private HTable table; private final Table table;
private ClusterConnection connection; private ClusterConnection connection;
private final List<HTableInterface> openTables; private final List<HTableInterface> openTables;
@ -73,7 +74,7 @@ public class HTableWrapper implements HTableInterface {
ClusterConnection connection, ExecutorService pool) ClusterConnection connection, ExecutorService pool)
throws IOException { throws IOException {
this.tableName = tableName; this.tableName = tableName;
this.table = new HTable(tableName, connection, pool); this.table = connection.getTable(tableName, pool);
this.connection = connection; this.connection = connection;
this.openTables = openTables; this.openTables = openTables;
this.openTables.add(this); this.openTables.add(this);
@ -114,7 +115,12 @@ public class HTableWrapper implements HTableInterface {
@Deprecated @Deprecated
public Result getRowOrBefore(byte[] row, byte[] family) public Result getRowOrBefore(byte[] row, byte[] family)
throws IOException { throws IOException {
return table.getRowOrBefore(row, family); Scan scan = Scan.createGetClosestRowOrBeforeReverseScan(row);
Result startRowResult = null;
try (ResultScanner resultScanner = this.table.getScanner(scan)) {
startRowResult = resultScanner.next();
}
return startRowResult;
} }
public Result get(Get get) throws IOException { public Result get(Get get) throws IOException {
@ -131,7 +137,14 @@ public class HTableWrapper implements HTableInterface {
@Deprecated @Deprecated
public Boolean[] exists(List<Get> gets) throws IOException { public Boolean[] exists(List<Get> gets) throws IOException {
return table.exists(gets); // Do convertion.
boolean [] exists = table.existsAll(gets);
if (exists == null) return null;
Boolean [] results = new Boolean [exists.length];
for (int i = 0; i < exists.length; i++) {
results[i] = exists[i]? Boolean.TRUE: Boolean.FALSE;
}
return results;
} }
public void put(Put put) throws IOException { public void put(Put put) throws IOException {
@ -296,12 +309,21 @@ public class HTableWrapper implements HTableInterface {
@Override @Override
public void setAutoFlush(boolean autoFlush) { public void setAutoFlush(boolean autoFlush) {
table.setAutoFlush(autoFlush, autoFlush); table.setAutoFlushTo(autoFlush);
} }
@Override @Override
public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) { public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail) {
table.setAutoFlush(autoFlush, clearBufferOnFail); setAutoFlush(autoFlush);
if (!autoFlush && !clearBufferOnFail) {
// We don't support his combination. In HTable, the implementation is this:
//
// this.clearBufferOnFail = autoFlush || clearBufferOnFail
//
// So if autoFlush == false and clearBufferOnFail is false, that is not supported in
// the new Table Interface so just throwing UnsupportedOperationException here.
throw new UnsupportedOperationException("Can't do this via wrapper");
}
} }
@Override @Override
@ -322,7 +344,8 @@ public class HTableWrapper implements HTableInterface {
@Override @Override
public long incrementColumnValue(byte[] row, byte[] family, public long incrementColumnValue(byte[] row, byte[] family,
byte[] qualifier, long amount, boolean writeToWAL) throws IOException { byte[] qualifier, long amount, boolean writeToWAL) throws IOException {
return table.incrementColumnValue(row, family, qualifier, amount, writeToWAL); return table.incrementColumnValue(row, family, qualifier, amount,
writeToWAL? Durability.USE_DEFAULT: Durability.SKIP_WAL);
} }
@Override @Override

View File

@ -22,11 +22,12 @@ import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -47,26 +48,28 @@ import org.apache.hadoop.mapred.Partitioner;
public class HRegionPartitioner<K2,V2> public class HRegionPartitioner<K2,V2>
implements Partitioner<ImmutableBytesWritable, V2> { implements Partitioner<ImmutableBytesWritable, V2> {
private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class);
private RegionLocator table; // Connection and locator are not cleaned up; they just die when partitioner is done.
private Connection connection;
private RegionLocator locator;
private byte[][] startKeys; private byte[][] startKeys;
public void configure(JobConf job) { public void configure(JobConf job) {
try { try {
this.table = new HTable(HBaseConfiguration.create(job), this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(job));
TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE))); TableName tableName = TableName.valueOf(job.get(TableOutputFormat.OUTPUT_TABLE));
this.locator = this.connection.getRegionLocator(tableName);
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }
try { try {
this.startKeys = this.table.getStartKeys(); this.startKeys = this.locator.getStartKeys();
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }
} }
public int getPartition(ImmutableBytesWritable key, public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) {
V2 value, int numPartitions) {
byte[] region = null; byte[] region = null;
// Only one region return 0 // Only one region return 0
if (this.startKeys.length == 1){ if (this.startKeys.length == 1){
@ -75,7 +78,7 @@ implements Partitioner<ImmutableBytesWritable, V2> {
try { try {
// Not sure if this is cached after a split so we could have problems // Not sure if this is cached after a split so we could have problems
// here if a region splits while mapping // here if a region splits while mapping
region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); region = locator.getRegionLocation(key.get()).getRegionInfo().getStartKey();
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.MutationSerialization; import org.apache.hadoop.hbase.mapreduce.MutationSerialization;
@ -211,7 +212,8 @@ public class TableMapReduceUtil {
MutationSerialization.class.getName(), ResultSerialization.class.getName()); MutationSerialization.class.getName(), ResultSerialization.class.getName());
if (partitioner == HRegionPartitioner.class) { if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class); job.setPartitionerClass(HRegionPartitioner.class);
int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); int regions =
MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) { if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions); job.setNumReduceTasks(regions);
} }
@ -275,9 +277,11 @@ public class TableMapReduceUtil {
* @param job The current job configuration to adjust. * @param job The current job configuration to adjust.
* @throws IOException When retrieving the table details fails. * @throws IOException When retrieving the table details fails.
*/ */
// Used by tests.
public static void limitNumReduceTasks(String table, JobConf job) public static void limitNumReduceTasks(String table, JobConf job)
throws IOException { throws IOException {
int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); int regions =
MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions); job.setNumReduceTasks(regions);
} }
@ -290,9 +294,11 @@ public class TableMapReduceUtil {
* @param job The current job configuration to adjust. * @param job The current job configuration to adjust.
* @throws IOException When retrieving the table details fails. * @throws IOException When retrieving the table details fails.
*/ */
// Used by tests.
public static void limitNumMapTasks(String table, JobConf job) public static void limitNumMapTasks(String table, JobConf job)
throws IOException { throws IOException {
int regions = MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table); int regions =
MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
if (job.getNumMapTasks() > regions) if (job.getNumMapTasks() > regions)
job.setNumMapTasks(regions); job.setNumMapTasks(regions);
} }
@ -307,7 +313,8 @@ public class TableMapReduceUtil {
*/ */
public static void setNumReduceTasks(String table, JobConf job) public static void setNumReduceTasks(String table, JobConf job)
throws IOException { throws IOException {
job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); job.setNumReduceTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job),
TableName.valueOf(table)));
} }
/** /**
@ -320,7 +327,8 @@ public class TableMapReduceUtil {
*/ */
public static void setNumMapTasks(String table, JobConf job) public static void setNumMapTasks(String table, JobConf job)
throws IOException { throws IOException {
job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job), table)); job.setNumMapTasks(MetaTableAccessor.getRegionCount(HBaseConfiguration.create(job),
TableName.valueOf(table)));
} }
/** /**

View File

@ -20,23 +20,19 @@ package org.apache.hadoop.hbase.mapred;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.InvalidJobConfException; import org.apache.hadoop.mapred.InvalidJobConfException;
import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.RecordWriter;
import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -50,48 +46,44 @@ public class TableOutputFormat extends FileOutputFormat<ImmutableBytesWritable,
/** JobConf parameter that specifies the output table */ /** JobConf parameter that specifies the output table */
public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; public static final String OUTPUT_TABLE = "hbase.mapred.outputtable";
private static final Log LOG = LogFactory.getLog(TableOutputFormat.class);
/** /**
* Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable) * Convert Reduce output (key, value) to (HStoreKey, KeyedDataArrayWritable)
* and write to an HBase table. * and write to an HBase table.
*/ */
protected static class TableRecordWriter implements RecordWriter<ImmutableBytesWritable, Put> { protected static class TableRecordWriter implements RecordWriter<ImmutableBytesWritable, Put> {
private final Connection conn; private Table m_table;
private final Table table;
/** /**
* Instantiate a TableRecordWriter with the HBase HClient for writing. Assumes control over the * Instantiate a TableRecordWriter with the HBase HClient for writing. Assumes control over the
* lifecycle of {@code conn}. * lifecycle of {@code conn}.
*/ */
public TableRecordWriter(Connection conn, TableName tableName) throws IOException { public TableRecordWriter(final Table table) throws IOException {
this.conn = conn; this.m_table = table;
this.table = conn.getTable(tableName);
((HTable) this.table).setAutoFlush(false, true);
} }
public void close(Reporter reporter) throws IOException { public void close(Reporter reporter) throws IOException {
table.close(); this.m_table.close();
conn.close();
} }
public void write(ImmutableBytesWritable key, Put value) throws IOException { public void write(ImmutableBytesWritable key, Put value) throws IOException {
table.put(new Put(value)); m_table.put(new Put(value));
} }
} }
@Override @Override
public RecordWriter<ImmutableBytesWritable, Put> getRecordWriter(FileSystem ignored, JobConf job, public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
String name, Progressable progress) throws IOException { Progressable progress)
throws IOException {
// expecting exactly one path
TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE)); TableName tableName = TableName.valueOf(job.get(OUTPUT_TABLE));
Connection conn = null; Table table = null;
try { // Connection is not closed. Dies with JVM. No possibility for cleanup.
conn = ConnectionFactory.createConnection(HBaseConfiguration.create(job)); Connection connection = ConnectionFactory.createConnection(job);
} catch(IOException e) { table = connection.getTable(tableName);
LOG.error(e); // Clear write buffer on fail is true by default so no need to reset it.
throw e; table.setAutoFlushTo(false);
} return new TableRecordWriter(table);
return new TableRecordWriter(conn, tableName);
} }
@Override @Override

View File

@ -28,11 +28,12 @@ import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
@ -66,10 +67,13 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression
@Override @Override
public void init() { public void init() {
// Reading all the labels and ordinal. // Reading all the labels and ordinal.
// This scan should be done by user with global_admin previliges.. Ensure that it works // This scan should be done by user with global_admin privileges.. Ensure that it works
Table labelsTable = null; Table labelsTable = null;
Connection connection = null;
try { try {
labelsTable = new HTable(conf, LABELS_TABLE_NAME); connection = ConnectionFactory.createConnection(conf);
try {
labelsTable = connection.getTable(LABELS_TABLE_NAME);
} catch (TableNotFoundException e) { } catch (TableNotFoundException e) {
// Just return with out doing any thing. When the VC is not used we wont be having 'labels' // Just return with out doing any thing. When the VC is not used we wont be having 'labels'
// table in the cluster. // table in the cluster.
@ -91,19 +95,27 @@ public class DefaultVisibilityExpressionResolver implements VisibilityExpression
labels.put(Bytes.toString(value), Bytes.toInt(row)); labels.put(Bytes.toString(value), Bytes.toInt(row));
} }
} catch (IOException e) { } catch (IOException e) {
LOG.error("Error reading 'labels' table", e); LOG.error("Error scanning 'labels' table", e);
} finally { } finally {
try { if (scanner != null) scanner.close();
if (scanner != null) {
scanner.close();
} }
} catch (IOException ioe) {
LOG.error("Failed reading 'labels' tags", ioe);
return;
} finally { } finally {
if (labelsTable != null) {
try { try {
labelsTable.close(); labelsTable.close();
} catch (IOException e) { } catch (IOException ioe) {
LOG.warn("Error on closing 'labels' table", e); LOG.warn("Error closing 'labels' table", ioe);
} }
} }
if (connection != null)
try {
connection.close();
} catch (IOException ioe) {
LOG.warn("Failed close of temporary connection", ioe);
}
} }
} }

View File

@ -28,9 +28,11 @@ import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapred.TableOutputFormat;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Partitioner; import org.apache.hadoop.mapreduce.Partitioner;
@ -55,7 +57,9 @@ implements Configurable {
private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class); private static final Log LOG = LogFactory.getLog(HRegionPartitioner.class);
private Configuration conf = null; private Configuration conf = null;
private RegionLocator table; // Connection and locator are not cleaned up; they just die when partitioner is done.
private Connection connection;
private RegionLocator locator;
private byte[][] startKeys; private byte[][] startKeys;
/** /**
@ -82,7 +86,7 @@ implements Configurable {
try { try {
// Not sure if this is cached after a split so we could have problems // Not sure if this is cached after a split so we could have problems
// here if a region splits while mapping // here if a region splits while mapping
region = table.getRegionLocation(key.get()).getRegionInfo().getStartKey(); region = this.locator.getRegionLocation(key.get()).getRegionInfo().getStartKey();
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }
@ -123,14 +127,14 @@ implements Configurable {
public void setConf(Configuration configuration) { public void setConf(Configuration configuration) {
this.conf = HBaseConfiguration.create(configuration); this.conf = HBaseConfiguration.create(configuration);
try { try {
TableName tableName = TableName.valueOf(configuration this.connection = ConnectionFactory.createConnection(HBaseConfiguration.create(conf));
.get(TableOutputFormat.OUTPUT_TABLE)); TableName tableName = TableName.valueOf(conf.get(TableOutputFormat.OUTPUT_TABLE));
this.table = new HTable(this.conf, tableName); this.locator = this.connection.getRegionLocator(tableName);
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }
try { try {
this.startKeys = this.table.getStartKeys(); this.startKeys = this.locator.getStartKeys();
} catch (IOException e) { } catch (IOException e) {
LOG.error(e); LOG.error(e);
} }

View File

@ -41,9 +41,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Base64; import org.apache.hadoop.hbase.util.Base64;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -401,8 +404,9 @@ public class ImportTsv extends Configured implements Tool {
*/ */
public static Job createSubmittableJob(Configuration conf, String[] args) public static Job createSubmittableJob(Configuration conf, String[] args)
throws IOException, ClassNotFoundException { throws IOException, ClassNotFoundException {
Job job = null;
HBaseAdmin admin = new HBaseAdmin(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Admin admin = connection.getAdmin()) {
// Support non-XML supported characters // Support non-XML supported characters
// by re-encoding the passed separator as a Base64 string. // by re-encoding the passed separator as a Base64 string.
String actualSeparator = conf.get(SEPARATOR_CONF_KEY); String actualSeparator = conf.get(SEPARATOR_CONF_KEY);
@ -413,13 +417,13 @@ public class ImportTsv extends Configured implements Tool {
// See if a non-default Mapper was set // See if a non-default Mapper was set
String mapperClassName = conf.get(MAPPER_CONF_KEY); String mapperClassName = conf.get(MAPPER_CONF_KEY);
Class mapperClass = mapperClassName != null ? Class mapperClass =
Class.forName(mapperClassName) : DEFAULT_MAPPER; mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER;
TableName tableName = TableName.valueOf(args[0]); TableName tableName = TableName.valueOf(args[0]);
Path inputDir = new Path(args[1]); Path inputDir = new Path(args[1]);
String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString());
Job job = Job.getInstance(conf, jobName); job = Job.getInstance(conf, jobName);
job.setJarByClass(mapperClass); job.setJarByClass(mapperClass);
FileInputFormat.setInputPaths(job, inputDir); FileInputFormat.setInputPaths(job, inputDir);
job.setInputFormatClass(TextInputFormat.class); job.setInputFormatClass(TextInputFormat.class);
@ -445,7 +449,7 @@ public class ImportTsv extends Configured implements Tool {
throw new TableNotFoundException(errorMsg); throw new TableNotFoundException(errorMsg);
} }
} }
HTable table = new HTable(conf, tableName); try (HTable table = (HTable)connection.getTable(tableName)) {
job.setReducerClass(PutSortReducer.class); job.setReducerClass(PutSortReducer.class);
Path outputDir = new Path(hfileOutPath); Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir); FileOutputFormat.setOutputPath(job, outputDir);
@ -458,6 +462,7 @@ public class ImportTsv extends Configured implements Tool {
job.setCombinerClass(PutCombiner.class); job.setCombinerClass(PutCombiner.class);
} }
HFileOutputFormat.configureIncrementalLoad(job, table); HFileOutputFormat.configureIncrementalLoad(job, table);
}
} else { } else {
if (!admin.tableExists(tableName)) { if (!admin.tableExists(tableName)) {
String errorMsg = format("Table '%s' does not exist.", tableName); String errorMsg = format("Table '%s' does not exist.", tableName);
@ -481,6 +486,8 @@ public class ImportTsv extends Configured implements Tool {
TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.addDependencyJars(job.getConfiguration(), TableMapReduceUtil.addDependencyJars(job.getConfiguration(),
com.google.common.base.Function.class /* Guava used by TsvParser */); com.google.common.base.Function.class /* Guava used by TsvParser */);
}
}
return job; return job;
} }

View File

@ -252,11 +252,12 @@ implements Configurable {
protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException { protected Pair<byte[][], byte[][]> getStartEndKeys() throws IOException {
if (conf.get(SPLIT_TABLE) != null) { if (conf.get(SPLIT_TABLE) != null) {
TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE)); TableName splitTableName = TableName.valueOf(conf.get(SPLIT_TABLE));
try (Connection conn = ConnectionFactory.createConnection(getConf()); try (Connection conn = ConnectionFactory.createConnection(getConf())) {
RegionLocator rl = conn.getRegionLocator(splitTableName)) { try (RegionLocator rl = conn.getRegionLocator(splitTableName)) {
return rl.getStartEndKeys(); return rl.getStartEndKeys();
} }
} }
}
return super.getStartEndKeys(); return super.getStartEndKeys();
} }

View File

@ -34,8 +34,6 @@ import java.util.zip.ZipFile;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -43,10 +41,11 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@ -662,7 +661,7 @@ public class TableMapReduceUtil {
job.setOutputValueClass(Writable.class); job.setOutputValueClass(Writable.class);
if (partitioner == HRegionPartitioner.class) { if (partitioner == HRegionPartitioner.class) {
job.setPartitionerClass(HRegionPartitioner.class); job.setPartitionerClass(HRegionPartitioner.class);
int regions = MetaTableAccessor.getRegionCount(conf, table); int regions = MetaTableAccessor.getRegionCount(conf, TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) { if (job.getNumReduceTasks() > regions) {
job.setNumReduceTasks(regions); job.setNumReduceTasks(regions);
} }
@ -687,7 +686,8 @@ public class TableMapReduceUtil {
*/ */
public static void limitNumReduceTasks(String table, Job job) public static void limitNumReduceTasks(String table, Job job)
throws IOException { throws IOException {
int regions = MetaTableAccessor.getRegionCount(job.getConfiguration(), table); int regions =
MetaTableAccessor.getRegionCount(job.getConfiguration(), TableName.valueOf(table));
if (job.getNumReduceTasks() > regions) if (job.getNumReduceTasks() > regions)
job.setNumReduceTasks(regions); job.setNumReduceTasks(regions);
} }
@ -702,7 +702,8 @@ public class TableMapReduceUtil {
*/ */
public static void setNumReduceTasks(String table, Job job) public static void setNumReduceTasks(String table, Job job)
throws IOException { throws IOException {
job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(), table)); job.setNumReduceTasks(MetaTableAccessor.getRegionCount(job.getConfiguration(),
TableName.valueOf(table)));
} }
/** /**

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
* Convert Map/Reduce output and write it to an HBase table. The KEY is ignored * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored
* while the output value <u>must</u> be either a {@link Put} or a * while the output value <u>must</u> be either a {@link Put} or a
* {@link Delete} instance. * {@link Delete} instance.
*
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable

View File

@ -339,7 +339,7 @@ public class AssignmentManager {
if (TableName.META_TABLE_NAME.equals(tableName)) { if (TableName.META_TABLE_NAME.equals(tableName)) {
hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper()); hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper());
} else { } else {
hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName, true); hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName, true);
} }
Integer pending = 0; Integer pending = 0;
@ -565,7 +565,7 @@ public class AssignmentManager {
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region)); ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
} }
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes,
this.server.getShortCircuitConnection()); this.server.getConnection());
} }
/** /**
@ -1564,7 +1564,7 @@ public class AssignmentManager {
TableState.State.ENABLING); TableState.State.ENABLING);
// Region assignment from META // Region assignment from META
List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection()); List<Result> results = MetaTableAccessor.fullScanOfMeta(server.getConnection());
// Get any new but slow to checkin region server that joined the cluster // Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet(); Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Set of offline servers to be returned // Set of offline servers to be returned

View File

@ -29,18 +29,19 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.MetaScanner; import org.apache.hadoop.hbase.client.MetaScanner;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor; import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
@ -62,6 +63,7 @@ public class CatalogJanitor extends Chore {
private final MasterServices services; private final MasterServices services;
private AtomicBoolean enabled = new AtomicBoolean(true); private AtomicBoolean enabled = new AtomicBoolean(true);
private AtomicBoolean alreadyRunning = new AtomicBoolean(false); private AtomicBoolean alreadyRunning = new AtomicBoolean(false);
private final Connection connection;
CatalogJanitor(final Server server, final MasterServices services) { CatalogJanitor(final Server server, final MasterServices services) {
super("CatalogJanitor-" + server.getServerName().toShortString(), super("CatalogJanitor-" + server.getServerName().toShortString(),
@ -69,6 +71,7 @@ public class CatalogJanitor extends Chore {
server); server);
this.server = server; this.server = server;
this.services = services; this.services = services;
this.connection = server.getConnection();
} }
@Override @Override
@ -163,7 +166,7 @@ public class CatalogJanitor extends Chore {
// Run full scan of hbase:meta catalog table passing in our custom visitor with // Run full scan of hbase:meta catalog table passing in our custom visitor with
// the start row // the start row
MetaScanner.metaScan(server.getConfiguration(), null, visitor, tableName); MetaScanner.metaScan(server.getConfiguration(), this.connection, visitor, tableName);
return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>( return new Triple<Integer, Map<HRegionInfo, Result>, Map<HRegionInfo, Result>>(
count.get(), mergedRegions, splitParents); count.get(), mergedRegions, splitParents);
@ -198,7 +201,7 @@ public class CatalogJanitor extends Chore {
+ " from fs because merged region no longer holds references"); + " from fs because merged region no longer holds references");
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB);
MetaTableAccessor.deleteMergeQualifiers(server.getShortCircuitConnection(), MetaTableAccessor.deleteMergeQualifiers(server.getConnection(),
mergedRegion); mergedRegion);
return true; return true;
} }
@ -331,7 +334,7 @@ public class CatalogJanitor extends Chore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent); if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent);
HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent);
MetaTableAccessor.deleteRegion(this.server.getShortCircuitConnection(), parent); MetaTableAccessor.deleteRegion(this.connection, parent);
result = true; result = true;
} }
return result; return result;
@ -404,7 +407,7 @@ public class CatalogJanitor extends Chore {
// Get merge regions if it is a merged region and already has merge // Get merge regions if it is a merged region and already has merge
// qualifier // qualifier
Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
.getRegionsFromMergeQualifier(this.services.getShortCircuitConnection(), .getRegionsFromMergeQualifier(this.services.getConnection(),
region.getRegionName()); region.getRegionName());
if (mergeRegions == null if (mergeRegions == null
|| (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) { || (mergeRegions.getFirst() == null && mergeRegions.getSecond() == null)) {

View File

@ -538,12 +538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); ZKClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId());
this.serverManager = createServerManager(this, this); this.serverManager = createServerManager(this, this);
synchronized (this) { setupClusterConnection();
if (shortCircuitConnection == null) {
shortCircuitConnection = createShortCircuitConnection();
metaTableLocator = new MetaTableLocator();
}
}
// Invalidate all write locks held previously // Invalidate all write locks held previously
this.tableLockManager.reapWriteLocks(); this.tableLockManager.reapWriteLocks();
@ -721,7 +716,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
metaState.getState(), metaState.getServerName(), null); metaState.getState(), metaState.getServerName(), null);
if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation( if (!metaState.isOpened() || !metaTableLocator.verifyMetaRegionLocation(
this.getShortCircuitConnection(), this.getZooKeeper(), timeout)) { this.getConnection(), this.getZooKeeper(), timeout)) {
ServerName currentMetaServer = metaState.getServerName(); ServerName currentMetaServer = metaState.getServerName();
if (serverManager.isServerOnline(currentMetaServer)) { if (serverManager.isServerOnline(currentMetaServer)) {
LOG.info("Meta was in transition on " + currentMetaServer); LOG.info("Meta was in transition on " + currentMetaServer);
@ -1492,6 +1487,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
* is found, but not currently deployed, the second element of the pair * is found, but not currently deployed, the second element of the pair
* may be null. * may be null.
*/ */
@VisibleForTesting // Used by TestMaster.
Pair<HRegionInfo, ServerName> getTableRegionForRow( Pair<HRegionInfo, ServerName> getTableRegionForRow(
final TableName tableName, final byte [] rowKey) final TableName tableName, final byte [] rowKey)
throws IOException { throws IOException {
@ -1542,7 +1538,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
if (isCatalogTable(tableName)) { if (isCatalogTable(tableName)) {
throw new IOException("Can't modify catalog tables"); throw new IOException("Can't modify catalog tables");
} }
if (!MetaTableAccessor.tableExists(getShortCircuitConnection(), tableName)) { if (!MetaTableAccessor.tableExists(getConnection(), tableName)) {
throw new TableNotFoundException(tableName); throw new TableNotFoundException(tableName);
} }
if (!getAssignmentManager().getTableStateManager(). if (!getAssignmentManager().getTableStateManager().

View File

@ -1132,7 +1132,7 @@ public class MasterRpcServices extends RSRpcServices
try { try {
master.checkInitialized(); master.checkInitialized();
Pair<HRegionInfo, ServerName> pair = Pair<HRegionInfo, ServerName> pair =
MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName); MetaTableAccessor.getRegion(master.getConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName)); if (pair == null) throw new UnknownRegionException(Bytes.toStringBinary(regionName));
HRegionInfo hri = pair.getFirst(); HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) { if (master.cpHost != null) {
@ -1263,7 +1263,7 @@ public class MasterRpcServices extends RSRpcServices
+ " actual: " + type); + " actual: " + type);
} }
Pair<HRegionInfo, ServerName> pair = Pair<HRegionInfo, ServerName> pair =
MetaTableAccessor.getRegion(master.getShortCircuitConnection(), regionName); MetaTableAccessor.getRegion(master.getConnection(), regionName);
if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName)); if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
HRegionInfo hri = pair.getFirst(); HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) { if (master.cpHost != null) {

View File

@ -240,11 +240,11 @@ public class RegionStateStore {
void splitRegion(HRegionInfo p, void splitRegion(HRegionInfo p,
HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
MetaTableAccessor.splitRegion(server.getShortCircuitConnection(), p, a, b, sn); MetaTableAccessor.splitRegion(server.getConnection(), p, a, b, sn);
} }
void mergeRegions(HRegionInfo p, void mergeRegions(HRegionInfo p,
HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException { HRegionInfo a, HRegionInfo b, ServerName sn) throws IOException {
MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(), p, a, b, sn); MetaTableAccessor.mergeRegions(server.getConnection(), p, a, b, sn);
} }
} }

View File

@ -934,7 +934,7 @@ public class RegionStates {
try { try {
Pair<HRegionInfo, ServerName> p = Pair<HRegionInfo, ServerName> p =
MetaTableAccessor.getRegion(server.getShortCircuitConnection(), regionName); MetaTableAccessor.getRegion(server.getConnection(), regionName);
HRegionInfo hri = p == null ? null : p.getFirst(); HRegionInfo hri = p == null ? null : p.getFirst();
if (hri != null) { if (hri != null) {
createRegionState(hri); createRegionState(hri);

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.ZKNamespaceManager;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
@ -69,7 +68,7 @@ public class TableNamespaceManager {
private Configuration conf; private Configuration conf;
private MasterServices masterServices; private MasterServices masterServices;
private HTable nsTable; private Table nsTable;
private ZKNamespaceManager zkNamespaceManager; private ZKNamespaceManager zkNamespaceManager;
private boolean initialized; private boolean initialized;
@ -82,7 +81,7 @@ public class TableNamespaceManager {
} }
public void start() throws IOException { public void start() throws IOException {
if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(), if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
TableName.NAMESPACE_TABLE_NAME)) { TableName.NAMESPACE_TABLE_NAME)) {
LOG.info("Namespace table not found. Creating..."); LOG.info("Namespace table not found. Creating...");
createNamespaceTable(masterServices); createNamespaceTable(masterServices);
@ -253,16 +252,14 @@ public class TableNamespaceManager {
public synchronized boolean isTableAvailableAndInitialized() throws IOException { public synchronized boolean isTableAvailableAndInitialized() throws IOException {
// Did we already get a table? If so, still make sure it's available // Did we already get a table? If so, still make sure it's available
if (initialized) { if (initialized) {
if (nsTable.getConnection().isClosed()) { this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME);
}
return true; return true;
} }
// Now check if the table is assigned, if not then fail fast // Now check if the table is assigned, if not then fail fast
if (isTableAssigned() && isTableEnabled()) { if (isTableAssigned() && isTableEnabled()) {
try { try {
nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME); nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper()); zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
zkNamespaceManager.start(); zkNamespaceManager.start();

View File

@ -25,21 +25,21 @@ import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.Map.Entry;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.RackManager; import org.apache.hadoop.hbase.master.RackManager;
@ -121,12 +121,14 @@ public class FavoredNodeAssignmentHelper {
} }
} }
// Write the region assignments to the meta table. // Write the region assignments to the meta table.
Table metaTable = null; // TODO: See above overrides take a Connection rather than a Configuration only the
try { // Connection is a short circuit connection. That is not going to good in all cases, when
metaTable = new HTable(conf, TableName.META_TABLE_NAME); // master and meta are not colocated. Fix when this favored nodes feature is actually used
// someday.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
metaTable.put(puts); metaTable.put(puts);
} finally { }
if (metaTable != null) metaTable.close();
} }
LOG.info("Added " + puts.size() + " regions in META"); LOG.info("Added " + puts.size() + " regions in META");
} }
@ -304,7 +306,6 @@ public class FavoredNodeAssignmentHelper {
* primary/secondary/tertiary RegionServers * primary/secondary/tertiary RegionServers
* @param primaryRSMap * @param primaryRSMap
* @return the map of regions to the servers the region-files should be hosted on * @return the map of regions to the servers the region-files should be hosted on
* @throws IOException
*/ */
public Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions( public Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
Map<HRegionInfo, ServerName> primaryRSMap) { Map<HRegionInfo, ServerName> primaryRSMap) {

View File

@ -75,7 +75,7 @@ public class FavoredNodeLoadBalancer extends BaseLoadBalancer {
List<RegionPlan> plans = new ArrayList<RegionPlan>(); List<RegionPlan> plans = new ArrayList<RegionPlan>();
//perform a scan of the meta to get the latest updates (if any) //perform a scan of the meta to get the latest updates (if any)
SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment = SnapshotOfRegionAssignmentFromMeta snaphotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(super.services.getShortCircuitConnection()); new SnapshotOfRegionAssignmentFromMeta(super.services.getConnection());
try { try {
snaphotOfRegionAssignment.initialize(); snaphotOfRegionAssignment.initialize();
} catch (IOException ie) { } catch (IOException ie) {

View File

@ -119,7 +119,7 @@ public class CreateTableHandler extends EventHandler {
boolean success = false; boolean success = false;
try { try {
TableName tableName = this.hTableDescriptor.getTableName(); TableName tableName = this.hTableDescriptor.getTableName();
if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { if (MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
throw new TableExistsException(tableName); throw new TableExistsException(tableName);
} }
success = true; success = true;
@ -289,6 +289,6 @@ public class CreateTableHandler extends EventHandler {
*/ */
protected void addRegionsToMeta(final List<HRegionInfo> regionInfos) protected void addRegionsToMeta(final List<HRegionInfo> regionInfos)
throws IOException { throws IOException {
MetaTableAccessor.addRegionsToMeta(this.server.getShortCircuitConnection(), regionInfos); MetaTableAccessor.addRegionsToMeta(this.server.getConnection(), regionInfos);
} }
} }

View File

@ -135,7 +135,7 @@ public class DeleteTableHandler extends TableEventHandler {
try { try {
// 1. Remove regions from META // 1. Remove regions from META
LOG.debug("Deleting regions from META"); LOG.debug("Deleting regions from META");
MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions); MetaTableAccessor.deleteRegions(this.server.getConnection(), regions);
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
// NOTE: At this point we still have data on disk, but nothing in hbase:meta // NOTE: At this point we still have data on disk, but nothing in hbase:meta

View File

@ -80,7 +80,7 @@ public class DisableTableHandler extends EventHandler {
boolean success = false; boolean success = false;
try { try {
// Check if table exists // Check if table exists
if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
throw new TableNotFoundException(tableName); throw new TableNotFoundException(tableName);
} }

View File

@ -91,7 +91,7 @@ public class EnableTableHandler extends EventHandler {
boolean success = false; boolean success = false;
try { try {
// Check if table exists // Check if table exists
if (!MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) { if (!MetaTableAccessor.tableExists(this.server.getConnection(), tableName)) {
// retainAssignment is true only during recovery. In normal case it is false // retainAssignment is true only during recovery. In normal case it is false
if (!this.skipTableStateCheck) { if (!this.skipTableStateCheck) {
throw new TableNotFoundException(tableName); throw new TableNotFoundException(tableName);
@ -177,7 +177,7 @@ public class EnableTableHandler extends EventHandler {
server.getZooKeeper()); server.getZooKeeper());
} else { } else {
tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
server.getShortCircuitConnection(), tableName, true); server.getConnection(), tableName, true);
} }
int countOfRegionsInTable = tableRegionsAndLocations.size(); int countOfRegionsInTable = tableRegionsAndLocations.size();

View File

@ -148,7 +148,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
throws InterruptedException, IOException, KeeperException { throws InterruptedException, IOException, KeeperException {
long timeout = this.server.getConfiguration(). long timeout = this.server.getConfiguration().
getLong("hbase.catalog.verification.timeout", 1000); getLong("hbase.catalog.verification.timeout", 1000);
if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getShortCircuitConnection(), if (!server.getMetaTableLocator().verifyMetaRegionLocation(server.getConnection(),
this.server.getZooKeeper(), timeout)) { this.server.getZooKeeper(), timeout)) {
this.services.getAssignmentManager().assignMeta(); this.services.getAssignmentManager().assignMeta();
} else if (serverName.equals(server.getMetaTableLocator().getMetaRegionLocation( } else if (serverName.equals(server.getMetaTableLocator().getMetaRegionLocation(

View File

@ -25,15 +25,14 @@ import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
@ -44,7 +43,6 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@InterfaceAudience.Private @InterfaceAudience.Private
@ -101,19 +99,14 @@ public class ModifyTableHandler extends TableEventHandler {
Set<byte[]> tableRows = new HashSet<byte[]>(); Set<byte[]> tableRows = new HashSet<byte[]>();
Scan scan = MetaTableAccessor.getScanForTableName(table); Scan scan = MetaTableAccessor.getScanForTableName(table);
scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER); scan.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
Table htable = null; Connection connection = this.masterServices.getConnection();
try { try (Table metaTable = connection.getTable(TableName.META_TABLE_NAME)) {
htable = new HTable(masterServices.getConfiguration(), TableName.META_TABLE_NAME); ResultScanner resScanner = metaTable.getScanner(scan);
ResultScanner resScanner = htable.getScanner(scan);
for (Result result : resScanner) { for (Result result : resScanner) {
tableRows.add(result.getRow()); tableRows.add(result.getRow());
} }
MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount, MetaTableAccessor.removeRegionReplicasFromMeta(tableRows, newReplicaCount,
oldReplicaCount - newReplicaCount, masterServices.getShortCircuitConnection()); oldReplicaCount - newReplicaCount, masterServices.getConnection());
} finally {
if (htable != null) {
htable.close();
}
} }
} }

View File

@ -28,19 +28,21 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CoordinatedStateException; import org.apache.hadoop.hbase.CoordinatedStateException;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.executor.EventType;
@ -48,10 +50,10 @@ import org.apache.hadoop.hbase.master.BulkReOpen;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
/** /**
* Base class for performing operations against tables. * Base class for performing operations against tables.
@ -130,7 +132,7 @@ public abstract class TableEventHandler extends EventHandler {
if (TableName.META_TABLE_NAME.equals(tableName)) { if (TableName.META_TABLE_NAME.equals(tableName)) {
hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper()); hris = new MetaTableLocator().getMetaRegions(server.getZooKeeper());
} else { } else {
hris = MetaTableAccessor.getTableRegions(server.getShortCircuitConnection(), tableName); hris = MetaTableAccessor.getTableRegions(server.getConnection(), tableName);
} }
handleTableOperation(hris); handleTableOperation(hris);
if (eventType.isOnlineSchemaChangeSupported() && this.masterServices. if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
@ -175,32 +177,32 @@ public abstract class TableEventHandler extends EventHandler {
public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException { public boolean reOpenAllRegions(List<HRegionInfo> regions) throws IOException {
boolean done = false; boolean done = false;
LOG.info("Bucketing regions by region server..."); LOG.info("Bucketing regions by region server...");
HTable table = new HTable(masterServices.getConfiguration(), tableName); List<HRegionLocation> regionLocations = null;
TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps Connection connection = this.masterServices.getConnection();
.newTreeMap(); try (RegionLocator locator = connection.getRegionLocator(tableName)) {
NavigableMap<HRegionInfo, ServerName> hriHserverMapping; regionLocations = locator.getAllRegionLocations();
try {
hriHserverMapping = table.getRegionLocations();
} finally {
table.close();
} }
// Convert List<HRegionLocation> to Map<HRegionInfo, ServerName>.
NavigableMap<HRegionInfo, ServerName> hri2Sn = new TreeMap<HRegionInfo, ServerName>();
for (HRegionLocation location: regionLocations) {
hri2Sn.put(location.getRegionInfo(), location.getServerName());
}
TreeMap<ServerName, List<HRegionInfo>> serverToRegions = Maps.newTreeMap();
List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>(); List<HRegionInfo> reRegions = new ArrayList<HRegionInfo>();
for (HRegionInfo hri : regions) { for (HRegionInfo hri : regions) {
ServerName rsLocation = hriHserverMapping.get(hri); ServerName sn = hri2Sn.get(hri);
// Skip the offlined split parent region // Skip the offlined split parent region
// See HBASE-4578 for more information. // See HBASE-4578 for more information.
if (null == rsLocation) { if (null == sn) {
LOG.info("Skip " + hri); LOG.info("Skip " + hri);
continue; continue;
} }
if (!serverToRegions.containsKey(rsLocation)) { if (!serverToRegions.containsKey(sn)) {
LinkedList<HRegionInfo> hriList = Lists.newLinkedList(); LinkedList<HRegionInfo> hriList = Lists.newLinkedList();
serverToRegions.put(rsLocation, hriList); serverToRegions.put(sn, hriList);
} }
reRegions.add(hri); reRegions.add(hri);
serverToRegions.get(rsLocation).add(hri); serverToRegions.get(sn).add(hri);
} }
LOG.info("Reopening " + reRegions.size() + " regions on " LOG.info("Reopening " + reRegions.size() + " regions on "

View File

@ -125,7 +125,7 @@ public class TruncateTableHandler extends DeleteTableHandler {
} }
// 4. Add regions to META // 4. Add regions to META
MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(), MetaTableAccessor.addRegionsToMeta(masterServices.getConnection(),
regionInfos); regionInfos);
// 5. Trigger immediate assignment of the regions in round-robin fashion // 5. Trigger immediate assignment of the regions in round-robin fashion

View File

@ -141,7 +141,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
protected void addRegionsToMeta(final List<HRegionInfo> regionInfos) protected void addRegionsToMeta(final List<HRegionInfo> regionInfos)
throws IOException { throws IOException {
super.addRegionsToMeta(regionInfos); super.addRegionsToMeta(regionInfos);
metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), regionInfos); metaChanges.updateMetaParentRegions(this.server.getConnection(), regionInfos);
} }
@Override @Override

View File

@ -155,7 +155,7 @@ public final class MasterSnapshotVerifier {
if (TableName.META_TABLE_NAME.equals(tableName)) { if (TableName.META_TABLE_NAME.equals(tableName)) {
regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper()); regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper());
} else { } else {
regions = MetaTableAccessor.getTableRegions(services.getShortCircuitConnection(), tableName); regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName);
} }
// Remove the non-default regions // Remove the non-default regions
RegionReplicaUtil.removeNonDefaultRegions(regions); RegionReplicaUtil.removeNonDefaultRegions(regions);

View File

@ -109,7 +109,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
@Override @Override
protected void handleTableOperation(List<HRegionInfo> hris) throws IOException { protected void handleTableOperation(List<HRegionInfo> hris) throws IOException {
MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem(); MasterFileSystem fileSystemManager = masterServices.getMasterFileSystem();
Connection conn = masterServices.getShortCircuitConnection(); Connection conn = masterServices.getConnection();
FileSystem fs = fileSystemManager.getFileSystem(); FileSystem fs = fileSystemManager.getFileSystem();
Path rootDir = fileSystemManager.getRootDir(); Path rootDir = fileSystemManager.getRootDir();
TableName tableName = hTableDescriptor.getTableName(); TableName tableName = hTableDescriptor.getTableName();
@ -163,7 +163,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
if (metaChanges.hasRegionsToRestore()) { if (metaChanges.hasRegionsToRestore()) {
MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore()); MetaTableAccessor.overwriteRegions(conn, metaChanges.getRegionsToRestore());
} }
metaChanges.updateMetaParentRegions(this.server.getShortCircuitConnection(), hris); metaChanges.updateMetaParentRegions(this.server.getConnection(), hris);
// At this point the restore is complete. Next step is enabling the table. // At this point the restore is complete. Next step is enabling the table.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +

View File

@ -721,7 +721,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest); SnapshotReferenceUtil.verifySnapshot(master.getConfiguration(), fs, manifest);
// Execute the restore/clone operation // Execute the restore/clone operation
if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) { if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
if (master.getTableStateManager().isTableState( if (master.getTableStateManager().isTableState(
TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) { TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
throw new UnsupportedOperationException("Table '" + throw new UnsupportedOperationException("Table '" +

View File

@ -174,7 +174,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
server.getZooKeeper()); server.getZooKeeper());
} else { } else {
regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
server.getShortCircuitConnection(), snapshotTable, false); server.getConnection(), snapshotTable, false);
} }
// run the snapshot // run the snapshot

View File

@ -132,7 +132,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
master.getZooKeeper()); master.getZooKeeper());
} else { } else {
regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations( regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tableName, false); master.getConnection(), tableName, false);
} }
} catch (InterruptedException e1) { } catch (InterruptedException e1) {
String msg = "Failed to get regions for '" + desc.getInstance() + "'"; String msg = "Failed to get regions for '" + desc.getInstance() + "'";

View File

@ -23,31 +23,21 @@ import java.util.HashSet;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleRequest;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleType; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.QuotaScope;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TimeUnit;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
/** /**
* Master Quota Manager. * Master Quota Manager.
@ -80,7 +70,7 @@ public class MasterQuotaManager {
} }
// Create the quota table if missing // Create the quota table if missing
if (!MetaTableAccessor.tableExists(masterServices.getShortCircuitConnection(), if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
QuotaUtil.QUOTA_TABLE_NAME)) { QuotaUtil.QUOTA_TABLE_NAME)) {
LOG.info("Quota table not found. Creating..."); LOG.info("Quota table not found. Creating...");
createQuotaTable(); createQuotaTable();
@ -101,10 +91,6 @@ public class MasterQuotaManager {
return enabled; return enabled;
} }
private Configuration getConfiguration() {
return masterServices.getConfiguration();
}
/* ========================================================================== /* ==========================================================================
* Admin operations to manage the quota table * Admin operations to manage the quota table
*/ */
@ -152,15 +138,15 @@ public class MasterQuotaManager {
setQuota(req, new SetQuotaOperations() { setQuota(req, new SetQuotaOperations() {
@Override @Override
public Quotas fetch() throws IOException { public Quotas fetch() throws IOException {
return QuotaUtil.getUserQuota(getConfiguration(), userName); return QuotaUtil.getUserQuota(masterServices.getConnection(), userName);
} }
@Override @Override
public void update(final Quotas quotas) throws IOException { public void update(final Quotas quotas) throws IOException {
QuotaUtil.addUserQuota(getConfiguration(), userName, quotas); QuotaUtil.addUserQuota(masterServices.getConnection(), userName, quotas);
} }
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName); QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName);
} }
@Override @Override
public void preApply(final Quotas quotas) throws IOException { public void preApply(final Quotas quotas) throws IOException {
@ -178,15 +164,15 @@ public class MasterQuotaManager {
setQuota(req, new SetQuotaOperations() { setQuota(req, new SetQuotaOperations() {
@Override @Override
public Quotas fetch() throws IOException { public Quotas fetch() throws IOException {
return QuotaUtil.getUserQuota(getConfiguration(), userName, table); return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, table);
} }
@Override @Override
public void update(final Quotas quotas) throws IOException { public void update(final Quotas quotas) throws IOException {
QuotaUtil.addUserQuota(getConfiguration(), userName, table, quotas); QuotaUtil.addUserQuota(masterServices.getConnection(), userName, table, quotas);
} }
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, table); QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, table);
} }
@Override @Override
public void preApply(final Quotas quotas) throws IOException { public void preApply(final Quotas quotas) throws IOException {
@ -204,15 +190,15 @@ public class MasterQuotaManager {
setQuota(req, new SetQuotaOperations() { setQuota(req, new SetQuotaOperations() {
@Override @Override
public Quotas fetch() throws IOException { public Quotas fetch() throws IOException {
return QuotaUtil.getUserQuota(getConfiguration(), userName, namespace); return QuotaUtil.getUserQuota(masterServices.getConnection(), userName, namespace);
} }
@Override @Override
public void update(final Quotas quotas) throws IOException { public void update(final Quotas quotas) throws IOException {
QuotaUtil.addUserQuota(getConfiguration(), userName, namespace, quotas); QuotaUtil.addUserQuota(masterServices.getConnection(), userName, namespace, quotas);
} }
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
QuotaUtil.deleteUserQuota(masterServices.getConfiguration(), userName, namespace); QuotaUtil.deleteUserQuota(masterServices.getConnection(), userName, namespace);
} }
@Override @Override
public void preApply(final Quotas quotas) throws IOException { public void preApply(final Quotas quotas) throws IOException {
@ -230,15 +216,15 @@ public class MasterQuotaManager {
setQuota(req, new SetQuotaOperations() { setQuota(req, new SetQuotaOperations() {
@Override @Override
public Quotas fetch() throws IOException { public Quotas fetch() throws IOException {
return QuotaUtil.getTableQuota(getConfiguration(), table); return QuotaUtil.getTableQuota(masterServices.getConnection(), table);
} }
@Override @Override
public void update(final Quotas quotas) throws IOException { public void update(final Quotas quotas) throws IOException {
QuotaUtil.addTableQuota(getConfiguration(), table, quotas); QuotaUtil.addTableQuota(masterServices.getConnection(), table, quotas);
} }
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
QuotaUtil.deleteTableQuota(getConfiguration(), table); QuotaUtil.deleteTableQuota(masterServices.getConnection(), table);
} }
@Override @Override
public void preApply(final Quotas quotas) throws IOException { public void preApply(final Quotas quotas) throws IOException {
@ -256,15 +242,15 @@ public class MasterQuotaManager {
setQuota(req, new SetQuotaOperations() { setQuota(req, new SetQuotaOperations() {
@Override @Override
public Quotas fetch() throws IOException { public Quotas fetch() throws IOException {
return QuotaUtil.getNamespaceQuota(getConfiguration(), namespace); return QuotaUtil.getNamespaceQuota(masterServices.getConnection(), namespace);
} }
@Override @Override
public void update(final Quotas quotas) throws IOException { public void update(final Quotas quotas) throws IOException {
QuotaUtil.addNamespaceQuota(getConfiguration(), namespace, quotas); QuotaUtil.addNamespaceQuota(masterServices.getConnection(), namespace, quotas);
} }
@Override @Override
public void delete() throws IOException { public void delete() throws IOException {
QuotaUtil.deleteNamespaceQuota(getConfiguration(), namespace); QuotaUtil.deleteNamespaceQuota(masterServices.getConnection(), namespace);
} }
@Override @Override
public void preApply(final Quotas quotas) throws IOException { public void preApply(final Quotas quotas) throws IOException {

View File

@ -23,19 +23,16 @@ import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Chore; import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -240,7 +237,7 @@ public class QuotaCache implements Stoppable {
@Override @Override
public Map<String, QuotaState> fetchEntries(final List<Get> gets) public Map<String, QuotaState> fetchEntries(final List<Get> gets)
throws IOException { throws IOException {
return QuotaUtil.fetchNamespaceQuotas(QuotaCache.this.getConfiguration(), gets); return QuotaUtil.fetchNamespaceQuotas(rsServices.getConnection(), gets);
} }
}); });
} }
@ -255,7 +252,7 @@ public class QuotaCache implements Stoppable {
@Override @Override
public Map<TableName, QuotaState> fetchEntries(final List<Get> gets) public Map<TableName, QuotaState> fetchEntries(final List<Get> gets)
throws IOException { throws IOException {
return QuotaUtil.fetchTableQuotas(QuotaCache.this.getConfiguration(), gets); return QuotaUtil.fetchTableQuotas(rsServices.getConnection(), gets);
} }
}); });
} }
@ -272,7 +269,7 @@ public class QuotaCache implements Stoppable {
@Override @Override
public Map<String, UserQuotaState> fetchEntries(final List<Get> gets) public Map<String, UserQuotaState> fetchEntries(final List<Get> gets)
throws IOException { throws IOException {
return QuotaUtil.fetchUserQuotas(QuotaCache.this.getConfiguration(), gets); return QuotaUtil.fetchUserQuotas(rsServices.getConnection(), gets);
} }
}); });
} }

View File

@ -19,15 +19,12 @@
package org.apache.hadoop.hbase.quotas; package org.apache.hadoop.hbase.quotas;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
@ -35,18 +32,19 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.security.UserGroupInformation;
/** /**
* Helper class to interact with the quota table * Helper class to interact with the quota table
@ -85,90 +83,89 @@ public class QuotaUtil extends QuotaTableUtil {
/* ========================================================================= /* =========================================================================
* Quota "settings" helpers * Quota "settings" helpers
*/ */
public static void addTableQuota(final Configuration conf, final TableName table, public static void addTableQuota(final Connection connection, final TableName table,
final Quotas data) throws IOException { final Quotas data) throws IOException {
addQuotas(conf, getTableRowKey(table), data); addQuotas(connection, getTableRowKey(table), data);
} }
public static void deleteTableQuota(final Configuration conf, final TableName table) public static void deleteTableQuota(final Connection connection, final TableName table)
throws IOException { throws IOException {
deleteQuotas(conf, getTableRowKey(table)); deleteQuotas(connection, getTableRowKey(table));
} }
public static void addNamespaceQuota(final Configuration conf, final String namespace, public static void addNamespaceQuota(final Connection connection, final String namespace,
final Quotas data) throws IOException { final Quotas data) throws IOException {
addQuotas(conf, getNamespaceRowKey(namespace), data); addQuotas(connection, getNamespaceRowKey(namespace), data);
} }
public static void deleteNamespaceQuota(final Configuration conf, final String namespace) public static void deleteNamespaceQuota(final Connection connection, final String namespace)
throws IOException { throws IOException {
deleteQuotas(conf, getNamespaceRowKey(namespace)); deleteQuotas(connection, getNamespaceRowKey(namespace));
} }
public static void addUserQuota(final Configuration conf, final String user, public static void addUserQuota(final Connection connection, final String user,
final Quotas data) throws IOException { final Quotas data) throws IOException {
addQuotas(conf, getUserRowKey(user), data); addQuotas(connection, getUserRowKey(user), data);
} }
public static void addUserQuota(final Configuration conf, final String user, public static void addUserQuota(final Connection connection, final String user,
final TableName table, final Quotas data) throws IOException { final TableName table, final Quotas data) throws IOException {
addQuotas(conf, getUserRowKey(user), addQuotas(connection, getUserRowKey(user), getSettingsQualifierForUserTable(table), data);
getSettingsQualifierForUserTable(table), data);
} }
public static void addUserQuota(final Configuration conf, final String user, public static void addUserQuota(final Connection connection, final String user,
final String namespace, final Quotas data) throws IOException { final String namespace, final Quotas data) throws IOException {
addQuotas(conf, getUserRowKey(user), addQuotas(connection, getUserRowKey(user),
getSettingsQualifierForUserNamespace(namespace), data); getSettingsQualifierForUserNamespace(namespace), data);
} }
public static void deleteUserQuota(final Configuration conf, final String user) public static void deleteUserQuota(final Connection connection, final String user)
throws IOException { throws IOException {
deleteQuotas(conf, getUserRowKey(user)); deleteQuotas(connection, getUserRowKey(user));
} }
public static void deleteUserQuota(final Configuration conf, final String user, public static void deleteUserQuota(final Connection connection, final String user,
final TableName table) throws IOException { final TableName table) throws IOException {
deleteQuotas(conf, getUserRowKey(user), deleteQuotas(connection, getUserRowKey(user),
getSettingsQualifierForUserTable(table)); getSettingsQualifierForUserTable(table));
} }
public static void deleteUserQuota(final Configuration conf, final String user, public static void deleteUserQuota(final Connection connection, final String user,
final String namespace) throws IOException { final String namespace) throws IOException {
deleteQuotas(conf, getUserRowKey(user), deleteQuotas(connection, getUserRowKey(user),
getSettingsQualifierForUserNamespace(namespace)); getSettingsQualifierForUserNamespace(namespace));
} }
private static void addQuotas(final Configuration conf, final byte[] rowKey, private static void addQuotas(final Connection connection, final byte[] rowKey,
final Quotas data) throws IOException { final Quotas data) throws IOException {
addQuotas(conf, rowKey, QUOTA_QUALIFIER_SETTINGS, data); addQuotas(connection, rowKey, QUOTA_QUALIFIER_SETTINGS, data);
} }
private static void addQuotas(final Configuration conf, final byte[] rowKey, private static void addQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier, final Quotas data) throws IOException { final byte[] qualifier, final Quotas data) throws IOException {
Put put = new Put(rowKey); Put put = new Put(rowKey);
put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data)); put.add(QUOTA_FAMILY_INFO, qualifier, quotasToData(data));
doPut(conf, put); doPut(connection, put);
} }
private static void deleteQuotas(final Configuration conf, final byte[] rowKey) private static void deleteQuotas(final Connection connection, final byte[] rowKey)
throws IOException { throws IOException {
deleteQuotas(conf, rowKey, null); deleteQuotas(connection, rowKey, null);
} }
private static void deleteQuotas(final Configuration conf, final byte[] rowKey, private static void deleteQuotas(final Connection connection, final byte[] rowKey,
final byte[] qualifier) throws IOException { final byte[] qualifier) throws IOException {
Delete delete = new Delete(rowKey); Delete delete = new Delete(rowKey);
if (qualifier != null) { if (qualifier != null) {
delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier); delete.deleteColumns(QUOTA_FAMILY_INFO, qualifier);
} }
doDelete(conf, delete); doDelete(connection, delete);
} }
public static Map<String, UserQuotaState> fetchUserQuotas(final Configuration conf, public static Map<String, UserQuotaState> fetchUserQuotas(final Connection connection,
final List<Get> gets) throws IOException { final List<Get> gets) throws IOException {
long nowTs = EnvironmentEdgeManager.currentTime(); long nowTs = EnvironmentEdgeManager.currentTime();
Result[] results = doGet(conf, gets); Result[] results = doGet(connection, gets);
Map<String, UserQuotaState> userQuotas = new HashMap<String, UserQuotaState>(results.length); Map<String, UserQuotaState> userQuotas = new HashMap<String, UserQuotaState>(results.length);
for (int i = 0; i < results.length; ++i) { for (int i = 0; i < results.length; ++i) {
@ -207,9 +204,9 @@ public class QuotaUtil extends QuotaTableUtil {
return userQuotas; return userQuotas;
} }
public static Map<TableName, QuotaState> fetchTableQuotas(final Configuration conf, public static Map<TableName, QuotaState> fetchTableQuotas(final Connection connection,
final List<Get> gets) throws IOException { final List<Get> gets) throws IOException {
return fetchGlobalQuotas("table", conf, gets, new KeyFromRow<TableName>() { return fetchGlobalQuotas("table", connection, gets, new KeyFromRow<TableName>() {
@Override @Override
public TableName getKeyFromRow(final byte[] row) { public TableName getKeyFromRow(final byte[] row) {
assert isTableRowKey(row); assert isTableRowKey(row);
@ -218,9 +215,9 @@ public class QuotaUtil extends QuotaTableUtil {
}); });
} }
public static Map<String, QuotaState> fetchNamespaceQuotas(final Configuration conf, public static Map<String, QuotaState> fetchNamespaceQuotas(final Connection connection,
final List<Get> gets) throws IOException { final List<Get> gets) throws IOException {
return fetchGlobalQuotas("namespace", conf, gets, new KeyFromRow<String>() { return fetchGlobalQuotas("namespace", connection, gets, new KeyFromRow<String>() {
@Override @Override
public String getKeyFromRow(final byte[] row) { public String getKeyFromRow(final byte[] row) {
assert isNamespaceRowKey(row); assert isNamespaceRowKey(row);
@ -230,9 +227,10 @@ public class QuotaUtil extends QuotaTableUtil {
} }
public static <K> Map<K, QuotaState> fetchGlobalQuotas(final String type, public static <K> Map<K, QuotaState> fetchGlobalQuotas(final String type,
final Configuration conf, final List<Get> gets, final KeyFromRow<K> kfr) throws IOException { final Connection connection, final List<Get> gets, final KeyFromRow<K> kfr)
throws IOException {
long nowTs = EnvironmentEdgeManager.currentTime(); long nowTs = EnvironmentEdgeManager.currentTime();
Result[] results = doGet(conf, gets); Result[] results = doGet(connection, gets);
Map<K, QuotaState> globalQuotas = new HashMap<K, QuotaState>(results.length); Map<K, QuotaState> globalQuotas = new HashMap<K, QuotaState>(results.length);
for (int i = 0; i < results.length; ++i) { for (int i = 0; i < results.length; ++i) {
@ -266,23 +264,17 @@ public class QuotaUtil extends QuotaTableUtil {
/* ========================================================================= /* =========================================================================
* HTable helpers * HTable helpers
*/ */
private static void doPut(final Configuration conf, final Put put) private static void doPut(final Connection connection, final Put put)
throws IOException { throws IOException {
HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
try {
table.put(put); table.put(put);
} finally {
table.close();
} }
} }
private static void doDelete(final Configuration conf, final Delete delete) private static void doDelete(final Connection connection, final Delete delete)
throws IOException { throws IOException {
HTable table = new HTable(conf, QuotaUtil.QUOTA_TABLE_NAME); try (Table table = connection.getTable(QuotaUtil.QUOTA_TABLE_NAME)) {
try {
table.delete(delete); table.delete(delete);
} finally {
table.close();
} }
} }

View File

@ -76,9 +76,9 @@ import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination; import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@ -207,12 +207,12 @@ public class HRegionServer extends HasThread implements
protected HeapMemoryManager hMemManager; protected HeapMemoryManager hMemManager;
/* /**
* Short-circuit (ie. bypassing RPC layer) HConnection to this Server * Cluster connection to be shared by services.
* to be used internally for miscellaneous needs. Initialized at the server startup * Initialized at server startup and closed when server shuts down.
* and closed when server shuts down. Clients must never close it explicitly. * Clients must never close it explicitly.
*/ */
protected HConnection shortCircuitConnection; protected ClusterConnection clusterConnection;
/* /*
* Long-living meta table locator, which is created when the server is started and stopped * Long-living meta table locator, which is created when the server is started and stopped
@ -605,11 +605,16 @@ public class HRegionServer extends HasThread implements
} }
/** /**
* Create wrapped short-circuit connection to this server. * Create a 'smarter' HConnection, one that is capable of by-passing RPC if the request is to
* In its own method so can intercept and mock it over in tests. * the local server. Safe to use going to local or remote server.
* Create this instance in a method can be intercepted and mocked in tests.
* @throws IOException * @throws IOException
*/ */
protected HConnection createShortCircuitConnection() throws IOException { @VisibleForTesting
protected ClusterConnection createClusterConnection() throws IOException {
// Create a cluster connection that when appropriate, can short-circuit and go directly to the
// local server if the request is to the local server bypassing RPC. Can be used for both local
// and remote invocations.
return ConnectionUtils.createShortCircuitHConnection( return ConnectionUtils.createShortCircuitHConnection(
ConnectionFactory.createConnection(conf), serverName, rpcServices, rpcServices); ConnectionFactory.createConnection(conf), serverName, rpcServices, rpcServices);
} }
@ -635,6 +640,17 @@ public class HRegionServer extends HasThread implements
return this.clusterId; return this.clusterId;
} }
/**
* Setup our cluster connection if not already initialized.
* @throws IOException
*/
protected synchronized void setupClusterConnection() throws IOException {
if (clusterConnection == null) {
clusterConnection = createClusterConnection();
metaTableLocator = new MetaTableLocator();
}
}
/** /**
* All initialization needed before we go register with Master. * All initialization needed before we go register with Master.
* *
@ -643,12 +659,7 @@ public class HRegionServer extends HasThread implements
*/ */
private void preRegistrationInitialization(){ private void preRegistrationInitialization(){
try { try {
synchronized (this) { setupClusterConnection();
if (shortCircuitConnection == null) {
shortCircuitConnection = createShortCircuitConnection();
metaTableLocator = new MetaTableLocator();
}
}
// Health checker thread. // Health checker thread.
if (isHealthCheckerConfigured()) { if (isHealthCheckerConfigured()) {
@ -946,13 +957,13 @@ public class HRegionServer extends HasThread implements
// so callers waiting for meta without timeout can stop // so callers waiting for meta without timeout can stop
if (this.metaTableLocator != null) this.metaTableLocator.stop(); if (this.metaTableLocator != null) this.metaTableLocator.stop();
if (this.shortCircuitConnection != null && !shortCircuitConnection.isClosed()) { if (this.clusterConnection != null && !clusterConnection.isClosed()) {
try { try {
this.shortCircuitConnection.close(); this.clusterConnection.close();
} catch (IOException e) { } catch (IOException e) {
// Although the {@link Closeable} interface throws an {@link // Although the {@link Closeable} interface throws an {@link
// IOException}, in reality, the implementation would never do that. // IOException}, in reality, the implementation would never do that.
LOG.error("Attempt to close server's short circuit HConnection failed.", e); LOG.warn("Attempt to close server's short circuit HConnection failed.", e);
} }
} }
@ -1737,8 +1748,8 @@ public class HRegionServer extends HasThread implements
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return this.shortCircuitConnection; return this.clusterConnection;
} }
@Override @Override
@ -1829,7 +1840,7 @@ public class HRegionServer extends HasThread implements
} }
} else { } else {
try { try {
MetaTableAccessor.updateRegionLocation(shortCircuitConnection, MetaTableAccessor.updateRegionLocation(clusterConnection,
hris[0], serverName, openSeqNum); hris[0], serverName, openSeqNum);
} catch (IOException e) { } catch (IOException e) {
LOG.info("Failed to update meta", e); LOG.info("Failed to update meta", e);

View File

@ -658,7 +658,7 @@ public class RegionMergeTransaction {
// Get merge regions if it is a merged region and already has merge // Get merge regions if it is a merged region and already has merge
// qualifier // qualifier
Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
.getRegionsFromMergeQualifier(services.getShortCircuitConnection(), regionName); .getRegionsFromMergeQualifier(services.getConnection(), regionName);
if (mergeRegions != null && if (mergeRegions != null &&
(mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) { (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
// It has merge qualifier // It has merge qualifier

View File

@ -24,12 +24,12 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -149,11 +149,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
return null; return null;
} }
@Override
public HConnection getShortCircuitConnection() {
return null;
}
@Override @Override
public MetaTableLocator getMetaTableLocator() { public MetaTableLocator getMetaTableLocator() {
return null; return null;
@ -181,5 +176,10 @@ public class ReplicationSyncUp extends Configured implements Tool {
public boolean isStopped() { public boolean isStopped() {
return false; return false;
} }
@Override
public ClusterConnection getConnection() {
return null;
}
} }
} }

View File

@ -33,7 +33,6 @@ import java.util.TreeSet;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
@ -44,9 +43,11 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
@ -170,12 +171,11 @@ public class AccessControlLists {
Bytes.toString(key)+": "+Bytes.toStringBinary(value) Bytes.toString(key)+": "+Bytes.toStringBinary(value)
); );
} }
Table acls = null; // TODO: Pass in a Connection rather than create one each time.
try { try (Connection connection = ConnectionFactory.createConnection(conf)) {
acls = new HTable(conf, ACL_TABLE_NAME); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
acls.put(p); table.put(p);
} finally { }
if (acls != null) acls.close();
} }
} }
@ -200,13 +200,12 @@ public class AccessControlLists {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Removing permission "+ userPerm.toString()); LOG.debug("Removing permission "+ userPerm.toString());
} }
d.deleteColumns(ACL_LIST_FAMILY, key); d.addColumns(ACL_LIST_FAMILY, key);
Table acls = null; // TODO: Pass in a Connection rather than create one each time.
try { try (Connection connection = ConnectionFactory.createConnection(conf)) {
acls = new HTable(conf, ACL_TABLE_NAME); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
acls.delete(d); table.delete(d);
} finally { }
if (acls != null) acls.close();
} }
} }
@ -220,13 +219,11 @@ public class AccessControlLists {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Removing permissions of removed table "+ tableName); LOG.debug("Removing permissions of removed table "+ tableName);
} }
// TODO: Pass in a Connection rather than create one each time.
Table acls = null; try (Connection connection = ConnectionFactory.createConnection(conf)) {
try { try (Table table = connection.getTable(ACL_TABLE_NAME)) {
acls = new HTable(conf, ACL_TABLE_NAME); table.delete(d);
acls.delete(d); }
} finally {
if (acls != null) acls.close();
} }
} }
@ -241,12 +238,10 @@ public class AccessControlLists {
LOG.debug("Removing permissions of removed namespace "+ namespace); LOG.debug("Removing permissions of removed namespace "+ namespace);
} }
Table acls = null; try (Connection connection = ConnectionFactory.createConnection(conf)) {
try { try (Table table = connection.getTable(ACL_TABLE_NAME)) {
acls = new HTable(conf, ACL_TABLE_NAME); table.delete(d);
acls.delete(d); }
} finally {
if (acls != null) acls.close();
} }
} }
@ -260,11 +255,9 @@ public class AccessControlLists {
LOG.debug("Removing permissions of removed column " + Bytes.toString(column) + LOG.debug("Removing permissions of removed column " + Bytes.toString(column) +
" from table "+ tableName); " from table "+ tableName);
} }
// TODO: Pass in a Connection rather than create one each time.
Table acls = null; try (Connection connection = ConnectionFactory.createConnection(conf)) {
try { try (Table table = connection.getTable(ACL_TABLE_NAME)) {
acls = new HTable(conf, ACL_TABLE_NAME);
Scan scan = new Scan(); Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY); scan.addFamily(ACL_LIST_FAMILY);
@ -275,7 +268,7 @@ public class AccessControlLists {
ACL_KEY_DELIMITER, columnName)))); ACL_KEY_DELIMITER, columnName))));
Set<byte[]> qualifierSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR); Set<byte[]> qualifierSet = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
ResultScanner scanner = acls.getScanner(scan); ResultScanner scanner = table.getScanner(scan);
try { try {
for (Result res : scanner) { for (Result res : scanner) {
for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) { for (byte[] q : res.getFamilyMap(ACL_LIST_FAMILY).navigableKeySet()) {
@ -289,12 +282,11 @@ public class AccessControlLists {
if (qualifierSet.size() > 0) { if (qualifierSet.size() > 0) {
Delete d = new Delete(tableName.getName()); Delete d = new Delete(tableName.getName());
for (byte[] qualifier : qualifierSet) { for (byte[] qualifier : qualifierSet) {
d.deleteColumns(ACL_LIST_FAMILY, qualifier); d.addColumns(ACL_LIST_FAMILY, qualifier);
}
table.delete(d);
} }
acls.delete(d);
} }
} finally {
if (acls != null) acls.close();
} }
} }
@ -422,19 +414,20 @@ public class AccessControlLists {
Scan scan = new Scan(); Scan scan = new Scan();
scan.addFamily(ACL_LIST_FAMILY); scan.addFamily(ACL_LIST_FAMILY);
Table acls = null;
ResultScanner scanner = null; ResultScanner scanner = null;
// TODO: Pass in a Connection rather than create one each time.
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Table table = connection.getTable(ACL_TABLE_NAME)) {
scanner = table.getScanner(scan);
try { try {
acls = new HTable(conf, ACL_TABLE_NAME);
scanner = acls.getScanner(scan);
for (Result row : scanner) { for (Result row : scanner) {
ListMultimap<String,TablePermission> resultPerms = ListMultimap<String,TablePermission> resultPerms = parsePermissions(row.getRow(), row);
parsePermissions(row.getRow(), row);
allPerms.put(row.getRow(), resultPerms); allPerms.put(row.getRow(), resultPerms);
} }
} finally { } finally {
if (scanner != null) scanner.close(); if (scanner != null) scanner.close();
if (acls != null) acls.close(); }
}
} }
return allPerms; return allPerms;
@ -465,20 +458,19 @@ public class AccessControlLists {
// for normal user tables, we just read the table row from _acl_ // for normal user tables, we just read the table row from _acl_
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create(); ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
Table acls = null; // TODO: Pass in a Connection rather than create one each time.
try { try (Connection connection = ConnectionFactory.createConnection(conf)) {
acls = new HTable(conf, ACL_TABLE_NAME); try (Table table = connection.getTable(ACL_TABLE_NAME)) {
Get get = new Get(entryName); Get get = new Get(entryName);
get.addFamily(ACL_LIST_FAMILY); get.addFamily(ACL_LIST_FAMILY);
Result row = acls.get(get); Result row = table.get(get);
if (!row.isEmpty()) { if (!row.isEmpty()) {
perms = parsePermissions(entryName, row); perms = parsePermissions(entryName, row);
} else { } else {
LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry " LOG.info("No permissions found in " + ACL_TABLE_NAME + " for acl entry "
+ Bytes.toString(entryName)); + Bytes.toString(entryName));
} }
} finally { }
if (acls != null) acls.close();
} }
return perms; return perms;

View File

@ -1087,7 +1087,7 @@ public class AccessController extends BaseMasterAndRegionObserver
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx)
throws IOException { throws IOException {
if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices() if (!MetaTableAccessor.tableExists(ctx.getEnvironment().getMasterServices()
.getShortCircuitConnection(), AccessControlLists.ACL_TABLE_NAME)) { .getConnection(), AccessControlLists.ACL_TABLE_NAME)) {
// initialize the ACL storage table // initialize the ACL storage table
AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices()); AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices());
} else { } else {

View File

@ -23,13 +23,15 @@ import java.lang.reflect.UndeclaredThrowableException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@ -45,6 +47,7 @@ import org.apache.hadoop.security.token.Token;
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class TokenUtil { public class TokenUtil {
// This class is referenced indirectly by User out in common; instances are created by reflection
private static Log LOG = LogFactory.getLog(TokenUtil.class); private static Log LOG = LogFactory.getLog(TokenUtil.class);
/** /**
@ -54,21 +57,19 @@ public class TokenUtil {
*/ */
public static Token<AuthenticationTokenIdentifier> obtainToken( public static Token<AuthenticationTokenIdentifier> obtainToken(
Configuration conf) throws IOException { Configuration conf) throws IOException {
Table meta = null; // TODO: Pass in a Connection to used. Will this even work?
try { try (Connection connection = ConnectionFactory.createConnection(conf)) {
meta = new HTable(conf, TableName.META_TABLE_NAME); try (Table meta = connection.getTable(TableName.META_TABLE_NAME)) {
CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW);
AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.BlockingInterface service =
AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel);
AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken(null, AuthenticationProtos.GetAuthenticationTokenResponse response =
service.getAuthenticationToken(null,
AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance());
return ProtobufUtil.toToken(response.getToken()); return ProtobufUtil.toToken(response.getToken());
} catch (ServiceException se) { } catch (ServiceException se) {
ProtobufUtil.toIOException(se); ProtobufUtil.toIOException(se);
} finally {
if (meta != null) {
meta.close();
} }
} }
// dummy return for ServiceException catch block // dummy return for ServiceException catch block

View File

@ -168,7 +168,7 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements
public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
// Need to create the new system table for labels here // Need to create the new system table for labels here
MasterServices master = ctx.getEnvironment().getMasterServices(); MasterServices master = ctx.getEnvironment().getMasterServices();
if (!MetaTableAccessor.tableExists(master.getShortCircuitConnection(), LABELS_TABLE_NAME)) { if (!MetaTableAccessor.tableExists(master.getConnection(), LABELS_TABLE_NAME)) {
HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME); HTableDescriptor labelsTable = new HTableDescriptor(LABELS_TABLE_NAME);
HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY); HColumnDescriptor labelsColumn = new HColumnDescriptor(LABELS_TABLE_FAMILY);
labelsColumn.setBloomFilterType(BloomType.NONE); labelsColumn.setBloomFilterType(BloomType.NONE);

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.tool; package org.apache.hadoop.hbase.tool;
import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -39,15 +40,17 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
@ -228,15 +231,19 @@ public final class Canary implements Tool {
} }
} }
// start to prepare the stuffs // Start to prepare the stuffs
Monitor monitor = null; Monitor monitor = null;
Thread monitorThread = null; Thread monitorThread = null;
long startTime = 0; long startTime = 0;
long currentTimeLength = 0; long currentTimeLength = 0;
// Get a connection to use in below.
// try-with-resources jdk7 construct. See
// http://docs.oracle.com/javase/tutorial/essential/exceptions/tryResourceClose.html
try (Connection connection = ConnectionFactory.createConnection(this.conf)) {
do { do {
// do monitor !! // Do monitor !!
monitor = this.newMonitor(index, args); try {
monitor = this.newMonitor(connection, index, args);
monitorThread = new Thread(monitor); monitorThread = new Thread(monitor);
startTime = System.currentTimeMillis(); startTime = System.currentTimeMillis();
monitorThread.start(); monitorThread.start();
@ -270,9 +277,13 @@ public final class Canary implements Tool {
monitorThread.interrupt(); monitorThread.interrupt();
System.exit(monitor.errorCode); System.exit(monitor.errorCode);
} }
} finally {
if (monitor != null) monitor.close();
}
Thread.sleep(interval); Thread.sleep(interval);
} while (interval > 0); } while (interval > 0);
} // try-with-resources close
return(monitor.errorCode); return(monitor.errorCode);
} }
@ -296,13 +307,13 @@ public final class Canary implements Tool {
} }
/** /**
* a Factory method for {@link Monitor}. * A Factory method for {@link Monitor}.
* Can be overrided by user. * Can be overridden by user.
* @param index a start index for monitor target * @param index a start index for monitor target
* @param args args passed from user * @param args args passed from user
* @return a Monitor instance * @return a Monitor instance
*/ */
public Monitor newMonitor(int index, String[] args) { public Monitor newMonitor(final Connection connection, int index, String[] args) {
Monitor monitor = null; Monitor monitor = null;
String[] monitorTargets = null; String[] monitorTargets = null;
@ -314,20 +325,20 @@ public final class Canary implements Tool {
if(this.regionServerMode) { if(this.regionServerMode) {
monitor = new RegionServerMonitor( monitor = new RegionServerMonitor(
this.conf, connection,
monitorTargets, monitorTargets,
this.useRegExp, this.useRegExp,
(ExtendedSink)this.sink); (ExtendedSink)this.sink);
} else { } else {
monitor = new RegionMonitor(this.conf, monitorTargets, this.useRegExp, this.sink); monitor = new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink);
} }
return monitor; return monitor;
} }
// a Monitor super-class can be extended by users // a Monitor super-class can be extended by users
public static abstract class Monitor implements Runnable { public static abstract class Monitor implements Runnable, Closeable {
protected Configuration config; protected Connection connection;
protected Admin admin; protected Admin admin;
protected String[] targets; protected String[] targets;
protected boolean useRegExp; protected boolean useRegExp;
@ -345,12 +356,16 @@ public final class Canary implements Tool {
return errorCode != 0; return errorCode != 0;
} }
protected Monitor(Configuration config, String[] monitorTargets, @Override
boolean useRegExp, Sink sink) { public void close() throws IOException {
if (null == config) if (this.admin != null) this.admin.close();
throw new IllegalArgumentException("config shall not be null"); }
this.config = config; protected Monitor(Connection connection, String[] monitorTargets,
boolean useRegExp, Sink sink) {
if (null == connection) throw new IllegalArgumentException("connection shall not be null");
this.connection = connection;
this.targets = monitorTargets; this.targets = monitorTargets;
this.useRegExp = useRegExp; this.useRegExp = useRegExp;
this.sink = sink; this.sink = sink;
@ -361,7 +376,7 @@ public final class Canary implements Tool {
protected boolean initAdmin() { protected boolean initAdmin() {
if (null == this.admin) { if (null == this.admin) {
try { try {
this.admin = new HBaseAdmin(config); this.admin = this.connection.getAdmin();
} catch (Exception e) { } catch (Exception e) {
LOG.error("Initial HBaseAdmin failed...", e); LOG.error("Initial HBaseAdmin failed...", e);
this.errorCode = INIT_ERROR_EXIT_CODE; this.errorCode = INIT_ERROR_EXIT_CODE;
@ -377,9 +392,9 @@ public final class Canary implements Tool {
// a monitor for region mode // a monitor for region mode
private static class RegionMonitor extends Monitor { private static class RegionMonitor extends Monitor {
public RegionMonitor(Configuration config, String[] monitorTargets, public RegionMonitor(Connection connection, String[] monitorTargets,
boolean useRegExp, Sink sink) { boolean useRegExp, Sink sink) {
super(config, monitorTargets, useRegExp, sink); super(connection, monitorTargets, useRegExp, sink);
} }
@Override @Override
@ -481,7 +496,7 @@ public final class Canary implements Tool {
Table table = null; Table table = null;
try { try {
table = new HTable(admin.getConfiguration(), tableDesc.getTableName()); table = admin.getConnection().getTable(tableDesc.getTableName());
} catch (TableNotFoundException e) { } catch (TableNotFoundException e) {
return; return;
} }
@ -556,9 +571,9 @@ public final class Canary implements Tool {
//a monitor for regionserver mode //a monitor for regionserver mode
private static class RegionServerMonitor extends Monitor { private static class RegionServerMonitor extends Monitor {
public RegionServerMonitor(Configuration config, String[] monitorTargets, public RegionServerMonitor(Connection connection, String[] monitorTargets,
boolean useRegExp, ExtendedSink sink) { boolean useRegExp, ExtendedSink sink) {
super(config, monitorTargets, useRegExp, sink); super(connection, monitorTargets, useRegExp, sink);
} }
private ExtendedSink getSink() { private ExtendedSink getSink() {
@ -622,7 +637,7 @@ public final class Canary implements Tool {
region = entry.getValue().get(0); region = entry.getValue().get(0);
try { try {
tableName = region.getTable(); tableName = region.getTable();
table = new HTable(this.admin.getConfiguration(), tableName); table = admin.getConnection().getTable(tableName);
startKey = region.getStartKey(); startKey = region.getStartKey();
// Can't do a get on empty start row so do a Scan of first element if any instead. // Can't do a get on empty start row so do a Scan of first element if any instead.
if(startKey.length > 0) { if(startKey.length > 0) {
@ -675,18 +690,19 @@ public final class Canary implements Tool {
private Map<String, List<HRegionInfo>> getAllRegionServerByName() { private Map<String, List<HRegionInfo>> getAllRegionServerByName() {
Map<String, List<HRegionInfo>> rsAndRMap = new HashMap<String, List<HRegionInfo>>(); Map<String, List<HRegionInfo>> rsAndRMap = new HashMap<String, List<HRegionInfo>>();
HTable table = null; Table table = null;
RegionLocator regionLocator = null;
try { try {
HTableDescriptor[] tableDescs = this.admin.listTables(); HTableDescriptor[] tableDescs = this.admin.listTables();
List<HRegionInfo> regions = null; List<HRegionInfo> regions = null;
for (HTableDescriptor tableDesc : tableDescs) { for (HTableDescriptor tableDesc : tableDescs) {
table = new HTable(this.admin.getConfiguration(), tableDesc.getTableName()); table = this.admin.getConnection().getTable(tableDesc.getTableName());
regionLocator = this.admin.getConnection().getRegionLocator(tableDesc.getTableName());
for (Map.Entry<HRegionInfo, ServerName> entry : table for (HRegionLocation location: regionLocator.getAllRegionLocations()) {
.getRegionLocations().entrySet()) { ServerName rs = location.getServerName();
ServerName rs = entry.getValue();
String rsName = rs.getHostname(); String rsName = rs.getHostname();
HRegionInfo r = entry.getKey(); HRegionInfo r = location.getRegionInfo();
if (rsAndRMap.containsKey(rsName)) { if (rsAndRMap.containsKey(rsName)) {
regions = rsAndRMap.get(rsName); regions = rsAndRMap.get(rsName);

View File

@ -21,11 +21,11 @@ package org.apache.hadoop.hbase.util;
import java.io.IOException; import java.io.IOException;
import java.math.BigInteger; import java.math.BigInteger;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
@ -39,23 +39,28 @@ import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.NoServerForRegionException; import org.apache.hadoop.hbase.client.NoServerForRegionException;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -364,8 +369,8 @@ public class RegionSplitter {
} }
static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo, static void createPresplitTable(TableName tableName, SplitAlgorithm splitAlgo,
String[] columnFamilies, Configuration conf) throws IOException, String[] columnFamilies, Configuration conf)
InterruptedException { throws IOException, InterruptedException {
final int splitCount = conf.getInt("split.count", 0); final int splitCount = conf.getInt("split.count", 0);
Preconditions.checkArgument(splitCount > 1, "Split count must be > 1"); Preconditions.checkArgument(splitCount > 1, "Split count must be > 1");
@ -378,7 +383,8 @@ public class RegionSplitter {
for (String cf : columnFamilies) { for (String cf : columnFamilies) {
desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf))); desc.addFamily(new HColumnDescriptor(Bytes.toBytes(cf)));
} }
HBaseAdmin admin = new HBaseAdmin(conf); try (Connection connection = ConnectionFactory.createConnection(conf)) {
Admin admin = connection.getAdmin();
try { try {
Preconditions.checkArgument(!admin.tableExists(tableName), Preconditions.checkArgument(!admin.tableExists(tableName),
"Table already exists: " + tableName); "Table already exists: " + tableName);
@ -391,34 +397,59 @@ public class RegionSplitter {
// NOTE: createTable is synchronous on the table, but not on the regions // NOTE: createTable is synchronous on the table, but not on the regions
int onlineRegions = 0; int onlineRegions = 0;
while (onlineRegions < splitCount) { while (onlineRegions < splitCount) {
onlineRegions = MetaTableAccessor.getRegionCount(conf, tableName); onlineRegions = MetaTableAccessor.getRegionCount(connection, tableName);
LOG.debug(onlineRegions + " of " + splitCount + " regions online..."); LOG.debug(onlineRegions + " of " + splitCount + " regions online...");
if (onlineRegions < splitCount) { if (onlineRegions < splitCount) {
Thread.sleep(10 * 1000); // sleep Thread.sleep(10 * 1000); // sleep
} }
} }
} }
LOG.debug("Finished creating table with " + splitCount + " regions"); LOG.debug("Finished creating table with " + splitCount + " regions");
} }
}
static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, /**
Configuration conf) throws IOException, InterruptedException { * Alternative getCurrentNrHRS which is no longer available.
* @param connection
* @return Rough count of regionservers out on cluster.
* @throws IOException
*/
private static int getRegionServerCount(final Connection connection) throws IOException {
try (Admin admin = connection.getAdmin()) {
ClusterStatus status = admin.getClusterStatus();
Collection<ServerName> servers = status.getServers();
return servers == null || servers.isEmpty()? 0: servers.size();
}
}
private static byte [] readFile(final FileSystem fs, final Path path) throws IOException {
FSDataInputStream tmpIn = fs.open(path);
try {
byte [] rawData = new byte[tmpIn.available()];
tmpIn.readFully(rawData);
return rawData;
} finally {
tmpIn.close();
}
}
static void rollingSplit(TableName tableName, SplitAlgorithm splitAlgo, Configuration conf)
throws IOException, InterruptedException {
final int minOS = conf.getInt("split.outstanding", 2); final int minOS = conf.getInt("split.outstanding", 2);
try (Connection connection = ConnectionFactory.createConnection(conf)) {
HTable table = new HTable(conf, tableName); // Max outstanding splits. default == 50% of servers
final int MAX_OUTSTANDING = Math.max(getRegionServerCount(connection) / 2, minOS);
// max outstanding splits. default == 50% of servers
final int MAX_OUTSTANDING =
Math.max(table.getConnection().getCurrentNrHRS() / 2, minOS);
Path hbDir = FSUtils.getRootDir(conf); Path hbDir = FSUtils.getRootDir(conf);
Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); Path tableDir = FSUtils.getTableDir(hbDir, tableName);
Path splitFile = new Path(tableDir, "_balancedSplit"); Path splitFile = new Path(tableDir, "_balancedSplit");
FileSystem fs = FileSystem.get(conf); FileSystem fs = FileSystem.get(conf);
// get a list of daughter regions to create // Get a list of daughter regions to create
LinkedList<Pair<byte[], byte[]>> tmpRegionSet = getSplits(table, splitAlgo); LinkedList<Pair<byte[], byte[]>> tmpRegionSet = null;
try (Table table = connection.getTable(tableName)) {
tmpRegionSet = getSplits(connection, tableName, splitAlgo);
}
LinkedList<Pair<byte[], byte[]>> outstanding = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> outstanding = Lists.newLinkedList();
int splitCount = 0; int splitCount = 0;
final int origCount = tmpRegionSet.size(); final int origCount = tmpRegionSet.size();
@ -429,9 +460,10 @@ public class RegionSplitter {
LOG.debug("Bucketing regions by regionserver..."); LOG.debug("Bucketing regions by regionserver...");
TreeMap<String, LinkedList<Pair<byte[], byte[]>>> daughterRegions = TreeMap<String, LinkedList<Pair<byte[], byte[]>>> daughterRegions =
Maps.newTreeMap(); Maps.newTreeMap();
// Get a regionLocator. Need it in below.
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
for (Pair<byte[], byte[]> dr : tmpRegionSet) { for (Pair<byte[], byte[]> dr : tmpRegionSet) {
String rsLocation = table.getRegionLocation(dr.getSecond()). String rsLocation = regionLocator.getRegionLocation(dr.getSecond()).getHostnamePort();
getHostnamePort();
if (!daughterRegions.containsKey(rsLocation)) { if (!daughterRegions.containsKey(rsLocation)) {
LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> entry = Lists.newLinkedList();
daughterRegions.put(rsLocation, entry); daughterRegions.put(rsLocation, entry);
@ -441,12 +473,11 @@ public class RegionSplitter {
LOG.debug("Done with bucketing. Split time!"); LOG.debug("Done with bucketing. Split time!");
long startTime = System.currentTimeMillis(); long startTime = System.currentTimeMillis();
// open the split file and modify it as splits finish // Open the split file and modify it as splits finish
FSDataInputStream tmpIn = fs.open(splitFile); byte[] rawData = readFile(fs, splitFile);
byte[] rawData = new byte[tmpIn.available()];
tmpIn.readFully(rawData);
tmpIn.close();
FSDataOutputStream splitOut = fs.create(splitFile); FSDataOutputStream splitOut = fs.create(splitFile);
try {
splitOut.write(rawData); splitOut.write(rawData);
try { try {
@ -454,18 +485,19 @@ public class RegionSplitter {
while (!daughterRegions.isEmpty()) { while (!daughterRegions.isEmpty()) {
LOG.debug(daughterRegions.size() + " RS have regions to splt."); LOG.debug(daughterRegions.size() + " RS have regions to splt.");
// Get RegionServer : region count mapping // Get ServerName to region count mapping
final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap(); final TreeMap<ServerName, Integer> rsSizes = Maps.newTreeMap();
Map<HRegionInfo, ServerName> regionsInfo = table.getRegionLocations(); List<HRegionLocation> hrls = regionLocator.getAllRegionLocations();
for (ServerName rs : regionsInfo.values()) { for (HRegionLocation hrl: hrls) {
if (rsSizes.containsKey(rs)) { ServerName sn = hrl.getServerName();
rsSizes.put(rs, rsSizes.get(rs) + 1); if (rsSizes.containsKey(sn)) {
rsSizes.put(sn, rsSizes.get(sn) + 1);
} else { } else {
rsSizes.put(rs, 1); rsSizes.put(sn, 1);
} }
} }
// sort the RS by the number of regions they have // Sort the ServerNames by the number of regions they have
List<String> serversLeft = Lists.newArrayList(daughterRegions .keySet()); List<String> serversLeft = Lists.newArrayList(daughterRegions .keySet());
Collections.sort(serversLeft, new Comparator<String>() { Collections.sort(serversLeft, new Comparator<String>() {
public int compare(String o1, String o2) { public int compare(String o1, String o2) {
@ -473,21 +505,20 @@ public class RegionSplitter {
} }
}); });
// round-robin through the RS list. Choose the lightest-loaded servers // Round-robin through the ServerName list. Choose the lightest-loaded servers
// first to keep the master from load-balancing regions as we split. // first to keep the master from load-balancing regions as we split.
for (String rsLoc : serversLeft) { for (String rsLoc : serversLeft) {
Pair<byte[], byte[]> dr = null; Pair<byte[], byte[]> dr = null;
// find a region in the RS list that hasn't been moved // Find a region in the ServerName list that hasn't been moved
LOG.debug("Finding a region on " + rsLoc); LOG.debug("Finding a region on " + rsLoc);
LinkedList<Pair<byte[], byte[]>> regionList = daughterRegions LinkedList<Pair<byte[], byte[]>> regionList = daughterRegions.get(rsLoc);
.get(rsLoc);
while (!regionList.isEmpty()) { while (!regionList.isEmpty()) {
dr = regionList.pop(); dr = regionList.pop();
// get current region info // get current region info
byte[] split = dr.getSecond(); byte[] split = dr.getSecond();
HRegionLocation regionLoc = table.getRegionLocation(split); HRegionLocation regionLoc = regionLocator.getRegionLocation(split);
// if this region moved locations // if this region moved locations
String newRs = regionLoc.getHostnamePort(); String newRs = regionLoc.getHostnamePort();
@ -531,11 +562,8 @@ public class RegionSplitter {
// we have a good region, time to split! // we have a good region, time to split!
byte[] split = dr.getSecond(); byte[] split = dr.getSecond();
LOG.debug("Splitting at " + splitAlgo.rowToStr(split)); LOG.debug("Splitting at " + splitAlgo.rowToStr(split));
HBaseAdmin admin = new HBaseAdmin(table.getConfiguration()); try (Admin admin = connection.getAdmin()) {
try { admin.split(tableName, split);
admin.split(table.getTableName(), split);
} finally {
admin.close();
} }
LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
@ -546,7 +574,7 @@ public class RegionSplitter {
// with too many outstanding splits, wait for some to finish // with too many outstanding splits, wait for some to finish
while (outstanding.size() >= MAX_OUTSTANDING) { while (outstanding.size() >= MAX_OUTSTANDING) {
LOG.debug("Wait for outstanding splits " + outstanding.size()); LOG.debug("Wait for outstanding splits " + outstanding.size());
local_finished = splitScan(outstanding, table, splitAlgo); local_finished = splitScan(outstanding, connection, tableName, splitAlgo);
if (local_finished.isEmpty()) { if (local_finished.isEmpty()) {
Thread.sleep(30 * 1000); Thread.sleep(30 * 1000);
} else { } else {
@ -578,7 +606,7 @@ public class RegionSplitter {
while (!outstanding.isEmpty()) { while (!outstanding.isEmpty()) {
LOG.debug("Finally Wait for outstanding splits " + outstanding.size()); LOG.debug("Finally Wait for outstanding splits " + outstanding.size());
LinkedList<Pair<byte[], byte[]>> finished = splitScan(outstanding, LinkedList<Pair<byte[], byte[]>> finished = splitScan(outstanding,
table, splitAlgo); connection, tableName, splitAlgo);
if (finished.isEmpty()) { if (finished.isEmpty()) {
Thread.sleep(30 * 1000); Thread.sleep(30 * 1000);
} else { } else {
@ -602,14 +630,14 @@ public class RegionSplitter {
LOG.debug("Avg Time / Split = " LOG.debug("Avg Time / Split = "
+ org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount)); + org.apache.hadoop.util.StringUtils.formatTime(tDiff / splitCount));
} }
}
} finally {
splitOut.close(); splitOut.close();
if (table != null){
table.close();
}
}
fs.delete(splitFile, false); fs.delete(splitFile, false);
} }
}
}
}
/** /**
* @throws IOException if the specified SplitAlgorithm class couldn't be * @throws IOException if the specified SplitAlgorithm class couldn't be
@ -647,21 +675,27 @@ public class RegionSplitter {
} }
static LinkedList<Pair<byte[], byte[]>> splitScan( static LinkedList<Pair<byte[], byte[]>> splitScan(
LinkedList<Pair<byte[], byte[]>> regionList, HTable table, LinkedList<Pair<byte[], byte[]>> regionList,
final Connection connection,
final TableName tableName,
SplitAlgorithm splitAlgo) SplitAlgorithm splitAlgo)
throws IOException, InterruptedException { throws IOException, InterruptedException {
LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> finished = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> logicalSplitting = Lists.newLinkedList();
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList(); LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
// get table info // Get table info
Path rootDir = FSUtils.getRootDir(table.getConfiguration()); Pair<Path, Path> tableDirAndSplitFile =
Path tableDir = FSUtils.getTableDir(rootDir, table.getName()); getTableDirAndSplitFile(connection.getConfiguration(), tableName);
FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); Path tableDir = tableDirAndSplitFile.getFirst();
HTableDescriptor htd = table.getTableDescriptor(); FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
// Clear the cache to forcibly refresh region information
// clear the cache to forcibly refresh region information ((ClusterConnection)connection).clearRegionCache();
table.clearRegionCache(); HTableDescriptor htd = null;
try (Table table = connection.getTable(tableName)) {
htd = table.getTableDescriptor();
}
try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
// for every region that hasn't been verified as a finished split // for every region that hasn't been verified as a finished split
for (Pair<byte[], byte[]> region : regionList) { for (Pair<byte[], byte[]> region : regionList) {
@ -670,7 +704,7 @@ public class RegionSplitter {
// see if the new split daughter region has come online // see if the new split daughter region has come online
try { try {
HRegionInfo dri = table.getRegionLocation(split).getRegionInfo(); HRegionInfo dri = regionLocator.getRegionLocation(split).getRegionInfo();
if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) { if (dri.isOffline() || !Bytes.equals(dri.getStartKey(), split)) {
logicalSplitting.add(region); logicalSplitting.add(region);
continue; continue;
@ -686,21 +720,20 @@ public class RegionSplitter {
// when a daughter region is opened, a compaction is triggered // when a daughter region is opened, a compaction is triggered
// wait until compaction completes for both daughter regions // wait until compaction completes for both daughter regions
LinkedList<HRegionInfo> check = Lists.newLinkedList(); LinkedList<HRegionInfo> check = Lists.newLinkedList();
check.add(table.getRegionLocation(start).getRegionInfo()); check.add(regionLocator.getRegionLocation(start).getRegionInfo());
check.add(table.getRegionLocation(split).getRegionInfo()); check.add(regionLocator.getRegionLocation(split).getRegionInfo());
for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) { for (HRegionInfo hri : check.toArray(new HRegionInfo[check.size()])) {
byte[] sk = hri.getStartKey(); byte[] sk = hri.getStartKey();
if (sk.length == 0) if (sk.length == 0)
sk = splitAlgo.firstRow(); sk = splitAlgo.firstRow();
String startKey = splitAlgo.rowToStr(sk);
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem( HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
table.getConfiguration(), fs, tableDir, hri, true); connection.getConfiguration(), fs, tableDir, hri, true);
// check every Column Family for that region // Check every Column Family for that region -- check does not have references.
boolean refFound = false; boolean refFound = false;
for (HColumnDescriptor c : htd.getFamilies()) { for (HColumnDescriptor c : htd.getFamilies()) {
if ((refFound = regionFs.hasReferences(htd.getTableName().getNameAsString()))) { if ((refFound = regionFs.hasReferences(c.getNameAsString()))) {
break; break;
} }
} }
@ -718,7 +751,7 @@ public class RegionSplitter {
} catch (NoServerForRegionException nsfre) { } catch (NoServerForRegionException nsfre) {
LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start)); LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
physicalSplitting.add(region); physicalSplitting.add(region);
table.clearRegionCache(); ((ClusterConnection)connection).clearRegionCache();
} }
} }
@ -728,27 +761,48 @@ public class RegionSplitter {
return finished; return finished;
} }
}
static LinkedList<Pair<byte[], byte[]>> getSplits(HTable table, /**
SplitAlgorithm splitAlgo) throws IOException { * @param conf
Path hbDir = FSUtils.getRootDir(table.getConfiguration()); * @param tableName
Path tableDir = FSUtils.getTableDir(hbDir, table.getName()); * @return A Pair where first item is table dir and second is the split file.
* @throws IOException
*/
private static Pair<Path, Path> getTableDirAndSplitFile(final Configuration conf,
final TableName tableName)
throws IOException {
Path hbDir = FSUtils.getRootDir(conf);
Path tableDir = FSUtils.getTableDir(hbDir, tableName);
Path splitFile = new Path(tableDir, "_balancedSplit"); Path splitFile = new Path(tableDir, "_balancedSplit");
FileSystem fs = tableDir.getFileSystem(table.getConfiguration()); return new Pair<Path, Path>(tableDir, splitFile);
}
// using strings because (new byte[]{0}).equals(new byte[]{0}) == false static LinkedList<Pair<byte[], byte[]>> getSplits(final Connection connection,
TableName tableName, SplitAlgorithm splitAlgo)
throws IOException {
Pair<Path, Path> tableDirAndSplitFile =
getTableDirAndSplitFile(connection.getConfiguration(), tableName);
Path tableDir = tableDirAndSplitFile.getFirst();
Path splitFile = tableDirAndSplitFile.getSecond();
FileSystem fs = tableDir.getFileSystem(connection.getConfiguration());
// Using strings because (new byte[]{0}).equals(new byte[]{0}) == false
Set<Pair<String, String>> daughterRegions = Sets.newHashSet(); Set<Pair<String, String>> daughterRegions = Sets.newHashSet();
// does a split file exist? // Does a split file exist?
if (!fs.exists(splitFile)) { if (!fs.exists(splitFile)) {
// NO = fresh start. calculate splits to make // NO = fresh start. calculate splits to make
LOG.debug("No _balancedSplit file. Calculating splits..."); LOG.debug("No " + splitFile.getName() + " file. Calculating splits ");
// query meta for all regions in the table // Query meta for all regions in the table
Set<Pair<byte[], byte[]>> rows = Sets.newHashSet(); Set<Pair<byte[], byte[]>> rows = Sets.newHashSet();
Pair<byte[][], byte[][]> tmp = table.getStartEndKeys(); Pair<byte[][], byte[][]> tmp = null;
Preconditions.checkArgument( try (RegionLocator regionLocator = connection.getRegionLocator(tableName)) {
tmp.getFirst().length == tmp.getSecond().length, tmp = regionLocator.getStartEndKeys();
}
Preconditions.checkArgument(tmp.getFirst().length == tmp.getSecond().length,
"Start and End rows should be equivalent"); "Start and End rows should be equivalent");
for (int i = 0; i < tmp.getFirst().length; ++i) { for (int i = 0; i < tmp.getFirst().length; ++i) {
byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i]; byte[] start = tmp.getFirst()[i], end = tmp.getSecond()[i];
@ -758,8 +812,7 @@ public class RegionSplitter {
end = splitAlgo.lastRow(); end = splitAlgo.lastRow();
rows.add(Pair.newPair(start, end)); rows.add(Pair.newPair(start, end));
} }
LOG.debug("Table " + Bytes.toString(table.getTableName()) + " has " LOG.debug("Table " + tableName + " has " + rows.size() + " regions that will be split.");
+ rows.size() + " regions that will be split.");
// prepare the split file // prepare the split file
Path tmpFile = new Path(tableDir, "_balancedSplit_prepare"); Path tmpFile = new Path(tableDir, "_balancedSplit_prepare");
@ -780,8 +833,8 @@ public class RegionSplitter {
fs.rename(tmpFile, splitFile); fs.rename(tmpFile, splitFile);
} else { } else {
LOG.debug("_balancedSplit file found. Replay log to restore state..."); LOG.debug("_balancedSplit file found. Replay log to restore state...");
FSUtils.getInstance(fs, table.getConfiguration()) FSUtils.getInstance(fs, connection.getConfiguration())
.recoverFileLease(fs, splitFile, table.getConfiguration(), null); .recoverFileLease(fs, splitFile, connection.getConfiguration(), null);
// parse split file and process remaining splits // parse split file and process remaining splits
FSDataInputStream tmpIn = fs.open(splitFile); FSDataInputStream tmpIn = fs.open(splitFile);

View File

@ -307,7 +307,7 @@ public class WALSplitter {
return true; return true;
} }
if(csm != null) { if(csm != null) {
HConnection scc = csm.getServer().getShortCircuitConnection(); HConnection scc = csm.getServer().getConnection();
TableName[] tables = scc.listTableNames(); TableName[] tables = scc.listTableNames();
for (TableName table : tables) { for (TableName table : tables) {
if (scc.getTableState(table) if (scc.getTableState(table)

View File

@ -152,11 +152,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
private boolean passedZkCluster = false; private boolean passedZkCluster = false;
private MiniDFSCluster dfsCluster = null; private MiniDFSCluster dfsCluster = null;
private HBaseCluster hbaseCluster = null; private volatile HBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null; private MiniMRCluster mrCluster = null;
/** If there is a mini cluster running for this testing utility instance. */ /** If there is a mini cluster running for this testing utility instance. */
private boolean miniClusterRunning; private volatile boolean miniClusterRunning;
private String hadoopLogDir; private String hadoopLogDir;
@ -167,6 +167,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* HBaseTestingUtility*/ * HBaseTestingUtility*/
private Path dataTestDirOnTestFS = null; private Path dataTestDirOnTestFS = null;
/**
* Shared cluster connection.
*/
private volatile Connection connection;
/** /**
* System property key to get test directory value. * System property key to get test directory value.
* Name is as it is because mini dfs has hard-codings to put test data here. * Name is as it is because mini dfs has hard-codings to put test data here.
@ -965,6 +970,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
*/ */
public void shutdownMiniCluster() throws Exception { public void shutdownMiniCluster() throws Exception {
LOG.info("Shutting down minicluster"); LOG.info("Shutting down minicluster");
if (this.connection != null && !this.connection.isClosed()) {
this.connection.close();
this.connection = null;
}
shutdownMiniHBaseCluster(); shutdownMiniHBaseCluster();
if (!this.passedZkCluster){ if (!this.passedZkCluster){
shutdownMiniZKCluster(); shutdownMiniZKCluster();
@ -1083,7 +1092,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return An HTable instance for the created table. * @return An HTable instance for the created table.
* @throws IOException * @throws IOException
*/ */
public HTable createTable(TableName tableName, String family) public Table createTable(TableName tableName, String family)
throws IOException{ throws IOException{
return createTable(tableName, new String[]{family}); return createTable(tableName, new String[]{family});
} }
@ -1107,7 +1116,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @return An HTable instance for the created table. * @return An HTable instance for the created table.
* @throws IOException * @throws IOException
*/ */
public HTable createTable(TableName tableName, String[] families) public Table createTable(TableName tableName, String[] families)
throws IOException { throws IOException {
List<byte[]> fams = new ArrayList<byte[]>(families.length); List<byte[]> fams = new ArrayList<byte[]>(families.length);
for (String family : families) { for (String family : families) {
@ -1146,13 +1155,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* Create a table. * Create a table.
* @param tableName * @param tableName
* @param families * @param families
* @return An HTable instance for the created table. * @return An HT
* able instance for the created table.
* @throws IOException * @throws IOException
*/ */
public HTable createTable(TableName tableName, byte[][] families) public HTable createTable(TableName tableName, byte[][] families)
throws IOException { throws IOException {
return createTable(tableName, families, return createTable(tableName, families, new Configuration(getConfiguration()));
new Configuration(getConfiguration()));
} }
public HTable createTable(byte[] tableName, byte[][] families, public HTable createTable(byte[] tableName, byte[][] families,
@ -1203,7 +1212,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
getHBaseAdmin().createTable(htd); getHBaseAdmin().createTable(htd);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
waitUntilAllRegionsAssigned(htd.getTableName()); waitUntilAllRegionsAssigned(htd.getTableName());
return new HTable(c, htd.getTableName()); return (HTable)getConnection().getTable(htd.getTableName());
} }
/** /**
@ -2565,6 +2574,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return hbaseCluster; return hbaseCluster;
} }
/**
* Get a Connection to the cluster.
* Not thread-safe (This class needs a lot of work to make it thread-safe).
* @return A Connection that can be shared. Don't close. Will be closed on shutdown of cluster.
* @throws IOException
*/
public Connection getConnection() throws IOException {
if (this.connection == null) {
if (getMiniHBaseCluster() == null) {
throw new IllegalStateException("You cannot have a Connection if cluster is not up");
}
this.connection = ConnectionFactory.createConnection(this.conf);
}
return this.connection;
}
/** /**
* Returns a Admin instance. * Returns a Admin instance.
* This instance is shared between HBaseTestingUtility instance users. * This instance is shared between HBaseTestingUtility instance users.
@ -2577,21 +2602,22 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public synchronized HBaseAdmin getHBaseAdmin() public synchronized HBaseAdmin getHBaseAdmin()
throws IOException { throws IOException {
if (hbaseAdmin == null){ if (hbaseAdmin == null){
hbaseAdmin = new HBaseAdminForTests(getConfiguration()); this.hbaseAdmin = new HBaseAdminForTests(getConnection());
} }
return hbaseAdmin; return hbaseAdmin;
} }
private HBaseAdminForTests hbaseAdmin = null; private HBaseAdminForTests hbaseAdmin = null;
private static class HBaseAdminForTests extends HBaseAdmin { private static class HBaseAdminForTests extends HBaseAdmin {
public HBaseAdminForTests(Configuration c) throws MasterNotRunningException, public HBaseAdminForTests(Connection connection) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException { ZooKeeperConnectionException, IOException {
super(c); super(connection);
} }
@Override @Override
public synchronized void close() throws IOException { public synchronized void close() throws IOException {
LOG.warn("close() called on HBaseAdmin instance returned from HBaseTestingUtility.getHBaseAdmin()"); LOG.warn("close() called on HBaseAdmin instance returned from " +
"HBaseTestingUtility.getHBaseAdmin()");
} }
private synchronized void close0() throws IOException { private synchronized void close0() throws IOException {

View File

@ -47,7 +47,7 @@ public class MetaMockingUtil {
* Returns a Result object constructed from the given region information simulating * Returns a Result object constructed from the given region information simulating
* a catalog table result. * a catalog table result.
* @param region the HRegionInfo object or null * @param region the HRegionInfo object or null
* @param ServerName to use making startcode and server hostname:port in meta or null * @param sn to use making startcode and server hostname:port in meta or null
* @return A mocked up Result that fakes a Get on a row in the <code>hbase:meta</code> table. * @return A mocked up Result that fakes a Get on a row in the <code>hbase:meta</code> table.
* @throws IOException * @throws IOException
*/ */
@ -60,7 +60,7 @@ public class MetaMockingUtil {
* Returns a Result object constructed from the given region information simulating * Returns a Result object constructed from the given region information simulating
* a catalog table result. * a catalog table result.
* @param region the HRegionInfo object or null * @param region the HRegionInfo object or null
* @param ServerName to use making startcode and server hostname:port in meta or null * @param sn to use making startcode and server hostname:port in meta or null
* @param splita daughter region or null * @param splita daughter region or null
* @param splitb daughter region or null * @param splitb daughter region or null
* @return A mocked up Result that fakes a Get on a row in the <code>hbase:meta</code> table. * @return A mocked up Result that fakes a Get on a row in the <code>hbase:meta</code> table.

View File

@ -27,7 +27,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.ipc.RpcServerInterface;
@ -138,7 +138,7 @@ class MockRegionServerServices implements RegionServerServices {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }

View File

@ -274,7 +274,7 @@ public class TestAcidGuarantees implements Tool {
} }
// Add a flusher // Add a flusher
ctx.addThread(new RepeatingTestThread(ctx) { ctx.addThread(new RepeatingTestThread(ctx) {
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); HBaseAdmin admin = util.getHBaseAdmin();
public void doAnAction() throws Exception { public void doAnAction() throws Exception {
try { try {
admin.flush(TABLE_NAME); admin.flush(TABLE_NAME);

View File

@ -28,12 +28,12 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -50,11 +50,6 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.log4j.Level;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -259,13 +254,13 @@ public class TestIOFencing {
LOG.info("Starting mini cluster"); LOG.info("Starting mini cluster");
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
CompactionBlockerRegion compactingRegion = null; CompactionBlockerRegion compactingRegion = null;
HBaseAdmin admin = null; Admin admin = null;
try { try {
LOG.info("Creating admin"); LOG.info("Creating admin");
admin = new HBaseAdmin(c); admin = TEST_UTIL.getConnection().getAdmin();
LOG.info("Creating table"); LOG.info("Creating table");
TEST_UTIL.createTable(TABLE_NAME, FAMILY); TEST_UTIL.createTable(TABLE_NAME, FAMILY);
HTable table = new HTable(c, TABLE_NAME); Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
LOG.info("Loading test table"); LOG.info("Loading test table");
// Find the region // Find the region
List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME); List<HRegion> testRegions = TEST_UTIL.getMiniHBaseCluster().findRegionsForTable(TABLE_NAME);
@ -299,7 +294,7 @@ public class TestIOFencing {
assertTrue(compactingRegion.countStoreFiles() > 1); assertTrue(compactingRegion.countStoreFiles() > 1);
final byte REGION_NAME[] = compactingRegion.getRegionName(); final byte REGION_NAME[] = compactingRegion.getRegionName();
LOG.info("Asking for compaction"); LOG.info("Asking for compaction");
admin.majorCompact(TABLE_NAME.getName()); ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName());
LOG.info("Waiting for compaction to be about to start"); LOG.info("Waiting for compaction to be about to start");
compactingRegion.waitForCompactionToBlock(); compactingRegion.waitForCompactionToBlock();
LOG.info("Starting a new server"); LOG.info("Starting a new server");
@ -339,7 +334,7 @@ public class TestIOFencing {
// If we survive the split keep going... // If we survive the split keep going...
// Now we make sure that the region isn't totally confused. Load up more rows. // Now we make sure that the region isn't totally confused. Load up more rows.
TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT); TEST_UTIL.loadNumericRows(table, FAMILY, FIRST_BATCH_COUNT, FIRST_BATCH_COUNT + SECOND_BATCH_COUNT);
admin.majorCompact(TABLE_NAME.getName()); ((HBaseAdmin)admin).majorCompact(TABLE_NAME.getName());
startWaitTime = System.currentTimeMillis(); startWaitTime = System.currentTimeMillis();
while (newRegion.compactCount == 0) { while (newRegion.compactCount == 0) {
Thread.sleep(1000); Thread.sleep(1000);

View File

@ -35,14 +35,16 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@ -73,6 +75,7 @@ public class TestZooKeeperTableArchiveClient {
private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME); private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
private static ZKTableArchiveClient archivingClient; private static ZKTableArchiveClient archivingClient;
private final List<Path> toCleanup = new ArrayList<Path>(); private final List<Path> toCleanup = new ArrayList<Path>();
private static ClusterConnection CONNECTION;
/** /**
* Setup the config for the cluster * Setup the config for the cluster
@ -81,8 +84,8 @@ public class TestZooKeeperTableArchiveClient {
public static void setupCluster() throws Exception { public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration()); setupConf(UTIL.getConfiguration());
UTIL.startMiniZKCluster(); UTIL.startMiniZKCluster();
archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), UTIL.getHBaseAdmin() CONNECTION = (ClusterConnection)ConnectionFactory.createConnection(UTIL.getConfiguration());
.getConnection()); archivingClient = new ZKTableArchiveClient(UTIL.getConfiguration(), CONNECTION);
// make hfile archiving node so we can archive files // make hfile archiving node so we can archive files
ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher(); ZooKeeperWatcher watcher = UTIL.getZooKeeperWatcher();
String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher); String archivingZNode = ZKTableArchiveClient.getArchiveZNode(UTIL.getConfiguration(), watcher);
@ -115,6 +118,7 @@ public class TestZooKeeperTableArchiveClient {
@AfterClass @AfterClass
public static void cleanupTest() throws Exception { public static void cleanupTest() throws Exception {
try { try {
CONNECTION.close();
UTIL.shutdownMiniZKCluster(); UTIL.shutdownMiniZKCluster();
} catch (Exception e) { } catch (Exception e) {
LOG.warn("problem shutting down cluster", e); LOG.warn("problem shutting down cluster", e);

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.mockito.Mockito; import org.mockito.Mockito;
/** /**
* {@link HConnection} testing utility. * {@link ClusterConnection} testing utility.
*/ */
public class HConnectionTestingUtility { public class HConnectionTestingUtility {
/* /*
@ -44,7 +44,7 @@ public class HConnectionTestingUtility {
/** /**
* Get a Mocked {@link HConnection} that goes with the passed <code>conf</code> * Get a Mocked {@link HConnection} that goes with the passed <code>conf</code>
* configuration instance. Minimally the mock will return * configuration instance. Minimally the mock will return
* <code>conf</conf> when {@link HConnection#getConfiguration()} is invoked. * <code>conf</conf> when {@link ClusterConnection#getConfiguration()} is invoked.
* Be sure to shutdown the connection when done by calling * Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration)} else it * {@link HConnectionManager#deleteConnection(Configuration)} else it
* will stick around; this is probably not what you want. * will stick around; this is probably not what you want.
@ -69,7 +69,7 @@ public class HConnectionTestingUtility {
/** /**
* Calls {@link #getMockedConnection(Configuration)} and then mocks a few * Calls {@link #getMockedConnection(Configuration)} and then mocks a few
* more of the popular {@link HConnection} methods so they do 'normal' * more of the popular {@link ClusterConnection} methods so they do 'normal'
* operation (see return doc below for list). Be sure to shutdown the * operation (see return doc below for list). Be sure to shutdown the
* connection when done by calling * connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration)} else it * {@link HConnectionManager#deleteConnection(Configuration)} else it
@ -85,12 +85,13 @@ public class HConnectionTestingUtility {
* @param hri HRegionInfo to include in the location returned when * @param hri HRegionInfo to include in the location returned when
* getRegionLocator is called on the mocked connection * getRegionLocator is called on the mocked connection
* @return Mock up a connection that returns a {@link Configuration} when * @return Mock up a connection that returns a {@link Configuration} when
* {@link HConnection#getConfiguration()} is called, a 'location' when * {@link ClusterConnection#getConfiguration()} is called, a 'location' when
* {@link HConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)} is called, * {@link ClusterConnection#getRegionLocation(org.apache.hadoop.hbase.TableName, byte[], boolean)}
* is called,
* and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
* {@link HConnection#getAdmin(ServerName)} is called, returns the passed * {@link ClusterConnection#getAdmin(ServerName)} is called, returns the passed
* {@link ClientProtos.ClientService.BlockingInterface} instance when * {@link ClientProtos.ClientService.BlockingInterface} instance when
* {@link HConnection#getClient(ServerName)} is called (Be sure to call * {@link ClusterConnection#getClient(ServerName)} is called (Be sure to call
* {@link HConnectionManager#deleteConnection(Configuration)} * {@link HConnectionManager#deleteConnection(Configuration)}
* when done with this mocked Connection. * when done with this mocked Connection.
* @throws IOException * @throws IOException
@ -134,11 +135,13 @@ public class HConnectionTestingUtility {
Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn( Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
RpcRetryingCallerFactory.instantiate(conf, RpcRetryingCallerFactory.instantiate(conf,
RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR)); RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR));
HTableInterface t = Mockito.mock(HTableInterface.class);
Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
return c; return c;
} }
/** /**
* Get a Mockito spied-upon {@link HConnection} that goes with the passed * Get a Mockito spied-upon {@link ClusterConnection} that goes with the passed
* <code>conf</code> configuration instance. * <code>conf</code> configuration instance.
* Be sure to shutdown the connection when done by calling * Be sure to shutdown the connection when done by calling
* {@link HConnectionManager#deleteConnection(Configuration)} else it * {@link HConnectionManager#deleteConnection(Configuration)} else it
@ -149,7 +152,7 @@ public class HConnectionTestingUtility {
* @see @link * @see @link
* {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)} * {http://mockito.googlecode.com/svn/branches/1.6/javadoc/org/mockito/Mockito.html#spy(T)}
*/ */
public static HConnection getSpiedConnection(final Configuration conf) public static ClusterConnection getSpiedConnection(final Configuration conf)
throws IOException { throws IOException {
HConnectionKey connectionKey = new HConnectionKey(conf); HConnectionKey connectionKey = new HConnectionKey(conf);
synchronized (ConnectionManager.CONNECTION_INSTANCES) { synchronized (ConnectionManager.CONNECTION_INSTANCES) {

View File

@ -111,13 +111,13 @@ public class TestFromClientSide3 {
} }
private void performMultiplePutAndFlush(HBaseAdmin admin, HTable table, private void performMultiplePutAndFlush(HBaseAdmin admin, HTable table,
byte[] row, byte[] family, int nFlushes, int nPuts) throws Exception { byte[] row, byte[] family, int nFlushes, int nPuts)
throws Exception {
// connection needed for poll-wait // connection needed for poll-wait
HConnection conn = HConnectionManager.getConnection(TEST_UTIL
.getConfiguration());
HRegionLocation loc = table.getRegionLocation(row, true); HRegionLocation loc = table.getRegionLocation(row, true);
AdminProtos.AdminService.BlockingInterface server = conn.getAdmin(loc.getServerName()); AdminProtos.AdminService.BlockingInterface server =
admin.getConnection().getAdmin(loc.getServerName());
byte[] regName = loc.getRegionInfo().getRegionName(); byte[] regName = loc.getRegionInfo().getRegionName();
for (int i = 0; i < nFlushes; i++) { for (int i = 0; i < nFlushes; i++) {
@ -151,12 +151,10 @@ public class TestFromClientSide3 {
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3); TEST_UTIL.getConfiguration().setInt("hbase.hstore.compaction.min", 3);
String tableName = "testAdvancedConfigOverride"; String tableName = "testAdvancedConfigOverride";
TableName TABLE = TableName TABLE = TableName.valueOf(tableName);
TableName.valueOf(tableName);
HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10); HTable hTable = TEST_UTIL.createTable(TABLE, FAMILY, 10);
HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
HConnection connection = HConnectionManager.getConnection(TEST_UTIL ClusterConnection connection = (ClusterConnection)TEST_UTIL.getConnection();
.getConfiguration());
// Create 3 store files. // Create 3 store files.
byte[] row = Bytes.toBytes(random.nextInt()); byte[] row = Bytes.toBytes(random.nextInt());

View File

@ -563,7 +563,7 @@ public class TestRegionObserverInterface {
String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest"; String testName = TestRegionObserverInterface.class.getName()+".bulkLoadHFileTest";
TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest"); TableName tableName = TableName.valueOf(TEST_TABLE.getNameAsString() + ".bulkLoadHFileTest");
Configuration conf = util.getConfiguration(); Configuration conf = util.getConfiguration();
Table table = util.createTable(tableName, new byte[][] {A, B, C}); HTable table = util.createTable(tableName, new byte[][] {A, B, C});
try { try {
verifyMethodResult(SimpleRegionObserver.class, verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
@ -578,7 +578,7 @@ public class TestRegionObserverInterface {
createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A); createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
// Bulk load // Bulk load
new LoadIncrementalHFiles(conf).doBulkLoad(dir, new HTable(conf, tableName)); new LoadIncrementalHFiles(conf).doBulkLoad(dir, table);
verifyMethodResult(SimpleRegionObserver.class, verifyMethodResult(SimpleRegionObserver.class,
new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"}, new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},

View File

@ -154,7 +154,7 @@ public class TestRegionServerObserver {
mergedRegion = rmt.stepsBeforePONR(rs, rs, false); mergedRegion = rmt.stepsBeforePONR(rs, rs, false);
rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(), rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(),
regionB.getRegionInfo(), rs.getServerName(), metaEntries); regionB.getRegionInfo(), rs.getServerName(), metaEntries);
MetaTableAccessor.mutateMetaTable(rs.getShortCircuitConnection(), metaEntries); MetaTableAccessor.mutateMetaTable(rs.getConnection(), metaEntries);
} }
@Override @Override

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -47,9 +48,7 @@ import org.apache.hadoop.hbase.testclassification.IOTests;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -71,7 +70,6 @@ public class TestChangingEncoding {
private static final int TIMEOUT_MS = 600000; private static final int TIMEOUT_MS = 600000;
private HBaseAdmin admin;
private HColumnDescriptor hcd; private HColumnDescriptor hcd;
private TableName tableName; private TableName tableName;
@ -93,7 +91,9 @@ public class TestChangingEncoding {
HTableDescriptor htd = new HTableDescriptor(tableName); HTableDescriptor htd = new HTableDescriptor(tableName);
hcd = new HColumnDescriptor(CF); hcd = new HColumnDescriptor(CF);
htd.addFamily(hcd); htd.addFamily(hcd);
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
admin.createTable(htd); admin.createTable(htd);
}
numBatchesWritten = 0; numBatchesWritten = 0;
} }
@ -112,16 +112,6 @@ public class TestChangingEncoding {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }
@Before
public void setUp() throws Exception {
admin = new HBaseAdmin(conf);
}
@After
public void tearDown() throws IOException {
admin.close();
}
private static byte[] getRowKey(int batchId, int i) { private static byte[] getRowKey(int batchId, int i) {
return Bytes.toBytes("batch" + batchId + "_row" + i); return Bytes.toBytes("batch" + batchId + "_row" + i);
} }
@ -184,6 +174,7 @@ public class TestChangingEncoding {
LOG.debug("Setting CF encoding to " + encoding + " (ordinal=" LOG.debug("Setting CF encoding to " + encoding + " (ordinal="
+ encoding.ordinal() + "), onlineChange=" + onlineChange); + encoding.ordinal() + "), onlineChange=" + onlineChange);
hcd.setDataBlockEncoding(encoding); hcd.setDataBlockEncoding(encoding);
try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
if (!onlineChange) { if (!onlineChange) {
admin.disableTable(tableName); admin.disableTable(tableName);
} }
@ -191,6 +182,7 @@ public class TestChangingEncoding {
if (!onlineChange) { if (!onlineChange) {
admin.enableTable(tableName); admin.enableTable(tableName);
} }
}
// This is a unit test, not integration test. So let's // This is a unit test, not integration test. So let's
// wait for regions out of transition. Otherwise, for online // wait for regions out of transition. Otherwise, for online
// encoding change, verification phase may be flaky because // encoding change, verification phase may be flaky because
@ -227,6 +219,7 @@ public class TestChangingEncoding {
private void compactAndWait() throws IOException, InterruptedException { private void compactAndWait() throws IOException, InterruptedException {
LOG.debug("Compacting table " + tableName); LOG.debug("Compacting table " + tableName);
HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0); HRegionServer rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
admin.majorCompact(tableName); admin.majorCompact(tableName);
// Waiting for the compaction to start, at least .5s. // Waiting for the compaction to start, at least .5s.

View File

@ -136,7 +136,6 @@ public class TestTableMapReduceUtil {
* does not exceed the number of regions for the given table. * does not exceed the number of regions for the given table.
*/ */
@Test @Test
@SuppressWarnings("deprecation")
public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable()
throws IOException { throws IOException {
Assert.assertNotNull(presidentsTable); Assert.assertNotNull(presidentsTable);
@ -155,7 +154,6 @@ public class TestTableMapReduceUtil {
} }
@Test @Test
@SuppressWarnings("deprecation")
public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable()
throws IOException { throws IOException {
Configuration cfg = UTIL.getConfiguration(); Configuration cfg = UTIL.getConfiguration();

View File

@ -23,8 +23,8 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import java.util.Random; import java.util.Random;
import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ConcurrentSkipListMap;
@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
@ -113,7 +113,7 @@ import com.google.protobuf.ServiceException;
* Use this when you can't bend Mockito to your liking (e.g. return null result * Use this when you can't bend Mockito to your liking (e.g. return null result
* when 'scanning' until master timesout and then return a coherent meta row * when 'scanning' until master timesout and then return a coherent meta row
* result thereafter. Have some facility for faking gets and scans. See * result thereafter. Have some facility for faking gets and scans. See
* {@link #setGetResult(byte[], byte[], Result)} for how to fill the backing data * setGetResult(byte[], byte[], Result) for how to fill the backing data
* store that the get pulls from. * store that the get pulls from.
*/ */
class MockRegionServer class MockRegionServer
@ -283,7 +283,7 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -304,7 +304,7 @@ public class TestActiveMasterManager {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -322,4 +322,3 @@ public class TestActiveMasterManager {
} }
} }
} }

View File

@ -38,25 +38,28 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaMockingUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.MetaMockingUtil; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination;
import org.apache.hadoop.hbase.coordination.SplitLogManagerCoordination.SplitLogManagerDetails;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator;
@ -99,7 +102,7 @@ public class TestCatalogJanitor {
* Be sure to call stop on the way out else could leave some mess around. * Be sure to call stop on the way out else could leave some mess around.
*/ */
class MockServer implements Server { class MockServer implements Server {
private final HConnection connection; private final ClusterConnection connection;
private final Configuration c; private final Configuration c;
MockServer(final HBaseTestingUtility htu) MockServer(final HBaseTestingUtility htu)
@ -145,7 +148,7 @@ public class TestCatalogJanitor {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return this.connection; return this.connection;
} }
@ -171,7 +174,12 @@ public class TestCatalogJanitor {
@Override @Override
public CoordinatedStateManager getCoordinatedStateManager() { public CoordinatedStateManager getCoordinatedStateManager() {
return null; BaseCoordinatedStateManager m = Mockito.mock(BaseCoordinatedStateManager.class);
SplitLogManagerCoordination c = Mockito.mock(SplitLogManagerCoordination.class);
Mockito.when(m.getSplitLogManagerCoordination()).thenReturn(c);
SplitLogManagerDetails d = Mockito.mock(SplitLogManagerDetails.class);
Mockito.when(c.getDetails()).thenReturn(d);
return m;
} }
@Override @Override
@ -266,7 +274,7 @@ public class TestCatalogJanitor {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -885,6 +893,7 @@ public class TestCatalogJanitor {
MasterServices services = new MockMasterServices(server); MasterServices services = new MockMasterServices(server);
// create the janitor // create the janitor
CatalogJanitor janitor = new CatalogJanitor(server, services); CatalogJanitor janitor = new CatalogJanitor(server, services);
// Create regions. // Create regions.

View File

@ -25,9 +25,12 @@ import java.net.InetAddress;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@ -45,7 +48,7 @@ public class TestClockSkewDetection {
final Configuration conf = HBaseConfiguration.create(); final Configuration conf = HBaseConfiguration.create();
ServerManager sm = new ServerManager(new Server() { ServerManager sm = new ServerManager(new Server() {
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -89,7 +92,8 @@ public class TestClockSkewDetection {
@Override @Override
public void stop(String why) { public void stop(String why) {
}}, null, false); }
}, null, false);
LOG.debug("regionServerStartup 1"); LOG.debug("regionServerStartup 1");
InetAddress ia1 = InetAddress.getLocalHost(); InetAddress ia1 = InetAddress.getLocalHost();

View File

@ -87,7 +87,7 @@ public class TestMaster {
ht.close(); ht.close();
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor.getTableRegionsAndLocations( List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
m.getShortCircuitConnection(), TABLENAME); m.getConnection(), TABLENAME);
LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
assertEquals(1, tableRegions.size()); assertEquals(1, tableRegions.size());
assertArrayEquals(HConstants.EMPTY_START_ROW, assertArrayEquals(HConstants.EMPTY_START_ROW,
@ -104,7 +104,7 @@ public class TestMaster {
Thread.sleep(100); Thread.sleep(100);
} }
LOG.info("Making sure we can call getTableRegions while opening"); LOG.info("Making sure we can call getTableRegions while opening");
tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getShortCircuitConnection(), tableRegions = MetaTableAccessor.getTableRegionsAndLocations(m.getConnection(),
TABLENAME, false); TABLENAME, false);
LOG.info("Regions: " + Joiner.on(',').join(tableRegions)); LOG.info("Regions: " + Joiner.on(',').join(tableRegions));
@ -115,7 +115,7 @@ public class TestMaster {
m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde")); m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
LOG.info("Result is: " + pair); LOG.info("Result is: " + pair);
Pair<HRegionInfo, ServerName> tableRegionFromName = Pair<HRegionInfo, ServerName> tableRegionFromName =
MetaTableAccessor.getRegion(m.getShortCircuitConnection(), MetaTableAccessor.getRegion(m.getConnection(),
pair.getFirst().getRegionName()); pair.getFirst().getRegionName());
assertEquals(tableRegionFromName.getFirst(), pair.getFirst()); assertEquals(tableRegionFromName.getFirst(), pair.getFirst());
} }

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
@ -218,8 +219,8 @@ public class TestMasterFailover {
assertTrue(master.isInitialized()); assertTrue(master.isInitialized());
// Create a table with a region online // Create a table with a region online
RegionLocator onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family"); Table onlineTable = TEST_UTIL.createTable(TableName.valueOf("onlineTable"), "family");
onlineTable.close();
// Create a table in META, so it has a region offline // Create a table in META, so it has a region offline
HTableDescriptor offlineTable = new HTableDescriptor( HTableDescriptor offlineTable = new HTableDescriptor(
TableName.valueOf(Bytes.toBytes("offlineTable"))); TableName.valueOf(Bytes.toBytes("offlineTable")));
@ -232,16 +233,18 @@ public class TestMasterFailover {
HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null); HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null);
createRegion(hriOffline, rootdir, conf, offlineTable); createRegion(hriOffline, rootdir, conf, offlineTable);
MetaTableAccessor.addRegionToMeta(master.getShortCircuitConnection(), hriOffline); MetaTableAccessor.addRegionToMeta(master.getConnection(), hriOffline);
log("Regions in hbase:meta and namespace have been created"); log("Regions in hbase:meta and namespace have been created");
// at this point we only expect 3 regions to be assigned out // at this point we only expect 3 regions to be assigned out
// (catalogs and namespace, + 1 online region) // (catalogs and namespace, + 1 online region)
assertEquals(3, cluster.countServedRegions()); assertEquals(3, cluster.countServedRegions());
HRegionInfo hriOnline = onlineTable.getRegionLocation( HRegionInfo hriOnline = null;
HConstants.EMPTY_START_ROW).getRegionInfo(); try (RegionLocator locator =
TEST_UTIL.getConnection().getRegionLocator(TableName.valueOf("onlineTable"))) {
hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
}
RegionStates regionStates = master.getAssignmentManager().getRegionStates(); RegionStates regionStates = master.getAssignmentManager().getRegionStates();
RegionStateStore stateStore = master.getAssignmentManager().getRegionStateStore(); RegionStateStore stateStore = master.getAssignmentManager().getRegionStateStore();

View File

@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@ -206,7 +206,7 @@ public class TestMasterNoCluster {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have an HConnection // the conf from the master; the conf will already have an HConnection
// associate so the below mocking of a connection will fail. // associate so the below mocking of a connection will fail.
@ -282,7 +282,7 @@ public class TestMasterNoCluster {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
// Insert a mock for the connection, use TESTUTIL.getConfiguration rather than // Insert a mock for the connection, use TESTUTIL.getConfiguration rather than
// the conf from the master; the conf will already have an HConnection // the conf from the master; the conf will already have an HConnection
// associate so the below mocking of a connection will fail. // associate so the below mocking of a connection will fail.

View File

@ -43,7 +43,9 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MetaTableAccessor.Visitor; import org.apache.hadoop.hbase.MetaTableAccessor.Visitor;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
@ -62,7 +64,8 @@ import org.junit.experimental.categories.Category;
public class TestMasterOperationsForRegionReplicas { public class TestMasterOperationsForRegionReplicas {
final static Log LOG = LogFactory.getLog(TestRegionPlacement.class); final static Log LOG = LogFactory.getLog(TestRegionPlacement.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static HBaseAdmin admin; private static Connection CONNECTION = null;
private static Admin ADMIN;
private static int numSlaves = 2; private static int numSlaves = 2;
private static Configuration conf; private static Configuration conf;
@ -71,14 +74,17 @@ public class TestMasterOperationsForRegionReplicas {
conf = TEST_UTIL.getConfiguration(); conf = TEST_UTIL.getConfiguration();
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false); conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
TEST_UTIL.startMiniCluster(numSlaves); TEST_UTIL.startMiniCluster(numSlaves);
admin = new HBaseAdmin(conf); CONNECTION = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
while(admin.getClusterStatus().getServers().size() < numSlaves) { ADMIN = CONNECTION.getAdmin();
while(ADMIN.getClusterStatus().getServers().size() < numSlaves) {
Thread.sleep(100); Thread.sleep(100);
} }
} }
@AfterClass @AfterClass
public static void tearDownAfterClass() throws Exception { public static void tearDownAfterClass() throws Exception {
if (ADMIN != null) ADMIN.close();
if (CONNECTION != null && !CONNECTION.isClosed()) CONNECTION.close();
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }
@ -91,15 +97,15 @@ public class TestMasterOperationsForRegionReplicas {
HTableDescriptor desc = new HTableDescriptor(table); HTableDescriptor desc = new HTableDescriptor(table);
desc.setRegionReplication(numReplica); desc.setRegionReplication(numReplica);
desc.addFamily(new HColumnDescriptor("family")); desc.addFamily(new HColumnDescriptor("family"));
admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
validateNumberOfRowsInMeta(table, numRegions, admin.getConnection()); validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
List<HRegionInfo> hris = MetaTableAccessor.getTableRegions( List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(
admin.getConnection(), table); ADMIN.getConnection(), table);
assert(hris.size() == numRegions * numReplica); assert(hris.size() == numRegions * numReplica);
} finally { } finally {
admin.disableTable(table); ADMIN.disableTable(table);
admin.deleteTable(table); ADMIN.deleteTable(table);
} }
} }
@ -112,11 +118,11 @@ public class TestMasterOperationsForRegionReplicas {
HTableDescriptor desc = new HTableDescriptor(table); HTableDescriptor desc = new HTableDescriptor(table);
desc.setRegionReplication(numReplica); desc.setRegionReplication(numReplica);
desc.addFamily(new HColumnDescriptor("family")); desc.addFamily(new HColumnDescriptor("family"));
admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
TEST_UTIL.waitTableEnabled(table); TEST_UTIL.waitTableEnabled(table);
validateNumberOfRowsInMeta(table, numRegions, admin.getConnection()); validateNumberOfRowsInMeta(table, numRegions, ADMIN.getConnection());
List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
assert(hris.size() == numRegions * numReplica); assert(hris.size() == numRegions * numReplica);
// check that the master created expected number of RegionState objects // check that the master created expected number of RegionState objects
for (int i = 0; i < numRegions; i++) { for (int i = 0; i < numRegions; i++) {
@ -128,7 +134,7 @@ public class TestMasterOperationsForRegionReplicas {
} }
} }
List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(admin.getConnection()); List<Result> metaRows = MetaTableAccessor.fullScanOfMeta(ADMIN.getConnection());
int numRows = 0; int numRows = 0;
for (Result result : metaRows) { for (Result result : metaRows) {
RegionLocations locations = MetaTableAccessor.getRegionLocations(result); RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
@ -145,7 +151,7 @@ public class TestMasterOperationsForRegionReplicas {
// The same verification of the meta as above but with the SnapshotOfRegionAssignmentFromMeta // The same verification of the meta as above but with the SnapshotOfRegionAssignmentFromMeta
// class // class
validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
admin.getConnection()); ADMIN.getConnection());
// Now kill the master, restart it and see if the assignments are kept // Now kill the master, restart it and see if the assignments are kept
ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster(); ServerName master = TEST_UTIL.getHBaseClusterInterface().getClusterStatus().getMaster();
@ -162,7 +168,7 @@ public class TestMasterOperationsForRegionReplicas {
} }
} }
validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
admin.getConnection()); ADMIN.getConnection());
// Now shut the whole cluster down, and verify the assignments are kept so that the // Now shut the whole cluster down, and verify the assignments are kept so that the
// availability constraints are met. // availability constraints are met.
@ -170,46 +176,42 @@ public class TestMasterOperationsForRegionReplicas {
TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, numSlaves); TEST_UTIL.startMiniHBaseCluster(1, numSlaves);
TEST_UTIL.waitTableEnabled(table); TEST_UTIL.waitTableEnabled(table);
admin.close();
admin = new HBaseAdmin(conf);
validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica, validateFromSnapshotFromMeta(TEST_UTIL, table, numRegions, numReplica,
admin.getConnection()); ADMIN.getConnection());
// Now shut the whole cluster down, and verify regions are assigned even if there is only // Now shut the whole cluster down, and verify regions are assigned even if there is only
// one server running // one server running
TEST_UTIL.shutdownMiniHBaseCluster(); TEST_UTIL.shutdownMiniHBaseCluster();
TEST_UTIL.startMiniHBaseCluster(1, 1); TEST_UTIL.startMiniHBaseCluster(1, 1);
TEST_UTIL.waitTableEnabled(table); TEST_UTIL.waitTableEnabled(table);
admin.close(); validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica);
admin = new HBaseAdmin(conf);
validateSingleRegionServerAssignment(admin.getConnection(), numRegions, numReplica);
for (int i = 1; i < numSlaves; i++) { //restore the cluster for (int i = 1; i < numSlaves; i++) { //restore the cluster
TEST_UTIL.getMiniHBaseCluster().startRegionServer(); TEST_UTIL.getMiniHBaseCluster().startRegionServer();
} }
//check on alter table //check on alter table
admin.disableTable(table); ADMIN.disableTable(table);
assert(admin.isTableDisabled(table)); assert(ADMIN.isTableDisabled(table));
//increase the replica //increase the replica
desc.setRegionReplication(numReplica + 1); desc.setRegionReplication(numReplica + 1);
admin.modifyTable(table, desc); ADMIN.modifyTable(table, desc);
admin.enableTable(table); ADMIN.enableTable(table);
assert(admin.isTableEnabled(table)); assert(ADMIN.isTableEnabled(table));
List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster() List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates().getRegionsOfTable(table); .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
assert(regions.size() == numRegions * (numReplica + 1)); assert(regions.size() == numRegions * (numReplica + 1));
//decrease the replica(earlier, table was modified to have a replica count of numReplica + 1) //decrease the replica(earlier, table was modified to have a replica count of numReplica + 1)
admin.disableTable(table); ADMIN.disableTable(table);
desc.setRegionReplication(numReplica); desc.setRegionReplication(numReplica);
admin.modifyTable(table, desc); ADMIN.modifyTable(table, desc);
admin.enableTable(table); ADMIN.enableTable(table);
assert(admin.isTableEnabled(table)); assert(ADMIN.isTableEnabled(table));
regions = TEST_UTIL.getMiniHBaseCluster().getMaster() regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates().getRegionsOfTable(table); .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
assert(regions.size() == numRegions * numReplica); assert(regions.size() == numRegions * numReplica);
//also make sure the meta table has the replica locations removed //also make sure the meta table has the replica locations removed
hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
assert(hris.size() == numRegions * numReplica); assert(hris.size() == numRegions * numReplica);
//just check that the number of default replica regions in the meta table are the same //just check that the number of default replica regions in the meta table are the same
//as the number of regions the table was created with, and the count of the //as the number of regions the table was created with, and the count of the
@ -225,8 +227,8 @@ public class TestMasterOperationsForRegionReplicas {
Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values()); Collection<Integer> counts = new HashSet<Integer>(defaultReplicas.values());
assert(counts.size() == 1 && counts.contains(new Integer(numReplica))); assert(counts.size() == 1 && counts.contains(new Integer(numReplica)));
} finally { } finally {
admin.disableTable(table); ADMIN.disableTable(table);
admin.deleteTable(table); ADMIN.deleteTable(table);
} }
} }
@ -241,17 +243,17 @@ public class TestMasterOperationsForRegionReplicas {
HTableDescriptor desc = new HTableDescriptor(table); HTableDescriptor desc = new HTableDescriptor(table);
desc.setRegionReplication(numReplica); desc.setRegionReplication(numReplica);
desc.addFamily(new HColumnDescriptor("family")); desc.addFamily(new HColumnDescriptor("family"));
admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions); ADMIN.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), numRegions);
TEST_UTIL.waitTableEnabled(table); TEST_UTIL.waitTableEnabled(table);
Set<byte[]> tableRows = new HashSet<byte[]>(); Set<byte[]> tableRows = new HashSet<byte[]>();
List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(admin.getConnection(), table); List<HRegionInfo> hris = MetaTableAccessor.getTableRegions(ADMIN.getConnection(), table);
for (HRegionInfo hri : hris) { for (HRegionInfo hri : hris) {
tableRows.add(hri.getRegionName()); tableRows.add(hri.getRegionName());
} }
admin.disableTable(table); ADMIN.disableTable(table);
// now delete one replica info from all the rows // now delete one replica info from all the rows
// this is to make the meta appear to be only partially updated // this is to make the meta appear to be only partially updated
Table metaTable = new HTable(TableName.META_TABLE_NAME, admin.getConnection()); Table metaTable = new HTable(TableName.META_TABLE_NAME, ADMIN.getConnection());
for (byte[] row : tableRows) { for (byte[] row : tableRows) {
Delete deleteOneReplicaLocation = new Delete(row); Delete deleteOneReplicaLocation = new Delete(row);
deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY, deleteOneReplicaLocation.deleteColumns(HConstants.CATALOG_FAMILY,
@ -265,14 +267,14 @@ public class TestMasterOperationsForRegionReplicas {
metaTable.close(); metaTable.close();
// even if the meta table is partly updated, when we re-enable the table, we should // even if the meta table is partly updated, when we re-enable the table, we should
// get back the desired number of replicas for the regions // get back the desired number of replicas for the regions
admin.enableTable(table); ADMIN.enableTable(table);
assert(admin.isTableEnabled(table)); assert(ADMIN.isTableEnabled(table));
List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster() List<HRegionInfo> regions = TEST_UTIL.getMiniHBaseCluster().getMaster()
.getAssignmentManager().getRegionStates().getRegionsOfTable(table); .getAssignmentManager().getRegionStates().getRegionsOfTable(table);
assert(regions.size() == numRegions * numReplica); assert(regions.size() == numRegions * numReplica);
} finally { } finally {
admin.disableTable(table); ADMIN.disableTable(table);
admin.deleteTable(table); ADMIN.deleteTable(table);
} }
} }
@ -286,7 +288,7 @@ public class TestMasterOperationsForRegionReplicas {
private void validateNumberOfRowsInMeta(final TableName table, int numRegions, private void validateNumberOfRowsInMeta(final TableName table, int numRegions,
Connection connection) throws IOException { Connection connection) throws IOException {
assert(admin.tableExists(table)); assert(ADMIN.tableExists(table));
final AtomicInteger count = new AtomicInteger(); final AtomicInteger count = new AtomicInteger();
Visitor visitor = new Visitor() { Visitor visitor = new Visitor() {
@Override @Override

View File

@ -25,12 +25,17 @@ import static org.junit.Assert.assertTrue;
import java.util.List; import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.LocalHBaseCluster; import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
@ -39,6 +44,8 @@ import org.junit.experimental.categories.Category;
@Category({MasterTests.class, LargeTests.class}) @Category({MasterTests.class, LargeTests.class})
public class TestMasterShutdown { public class TestMasterShutdown {
public static final Log LOG = LogFactory.getLog(TestMasterShutdown.class);
/** /**
* Simple test of shutdown. * Simple test of shutdown.
* <p> * <p>
@ -46,9 +53,8 @@ public class TestMasterShutdown {
* Verifies that all masters are properly shutdown. * Verifies that all masters are properly shutdown.
* @throws Exception * @throws Exception
*/ */
@Test (timeout=240000) @Test (timeout=120000)
public void testMasterShutdown() throws Exception { public void testMasterShutdown() throws Exception {
final int NUM_MASTERS = 3; final int NUM_MASTERS = 3;
final int NUM_RS = 3; final int NUM_RS = 3;
@ -56,9 +62,9 @@ public class TestMasterShutdown {
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
// Start the cluster // Start the cluster
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); HBaseTestingUtility htu = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); htu.startMiniCluster(NUM_MASTERS, NUM_RS);
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster(); MiniHBaseCluster cluster = htu.getHBaseCluster();
// get all the master threads // get all the master threads
List<MasterThread> masterThreads = cluster.getMasterThreads(); List<MasterThread> masterThreads = cluster.getMasterThreads();
@ -91,12 +97,11 @@ public class TestMasterShutdown {
// make sure all the masters properly shutdown // make sure all the masters properly shutdown
assertEquals(0, masterThreads.size()); assertEquals(0, masterThreads.size());
TEST_UTIL.shutdownMiniCluster(); htu.shutdownMiniCluster();
} }
@Test(timeout = 180000) @Test(timeout = 60000)
public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception { public void testMasterShutdownBeforeStartingAnyRegionServer() throws Exception {
final int NUM_MASTERS = 1; final int NUM_MASTERS = 1;
final int NUM_RS = 0; final int NUM_RS = 0;
@ -106,25 +111,35 @@ public class TestMasterShutdown {
conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1); conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
// Start the cluster // Start the cluster
final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf); final HBaseTestingUtility util = new HBaseTestingUtility(conf);
TEST_UTIL.startMiniDFSCluster(3); util.startMiniDFSCluster(3);
TEST_UTIL.startMiniZKCluster(); util.startMiniZKCluster();
TEST_UTIL.createRootDir(); util.createRootDir();
final LocalHBaseCluster cluster = final LocalHBaseCluster cluster =
new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class, new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS, HMaster.class,
MiniHBaseCluster.MiniHBaseClusterRegionServer.class); MiniHBaseCluster.MiniHBaseClusterRegionServer.class);
final MasterThread master = cluster.getMasters().get(0); final int MASTER_INDEX = 0;
final MasterThread master = cluster.getMasters().get(MASTER_INDEX);
master.start(); master.start();
LOG.info("Called master start on " + master.getName());
Thread shutdownThread = new Thread() { Thread shutdownThread = new Thread() {
public void run() { public void run() {
LOG.info("Before call to shutdown master");
try { try {
TEST_UTIL.getHBaseAdmin().shutdown(); try (Connection connection =
cluster.waitOnMaster(0); ConnectionFactory.createConnection(util.getConfiguration())) {
try (Admin admin = connection.getAdmin()) {
admin.shutdown();
}
}
LOG.info("After call to shutdown master");
cluster.waitOnMaster(MASTER_INDEX);
} catch (Exception e) { } catch (Exception e) {
} }
}; };
}; };
shutdownThread.start(); shutdownThread.start();
LOG.info("Called master join on " + master.getName());
master.join(); master.join();
shutdownThread.join(); shutdownThread.join();
@ -132,10 +147,8 @@ public class TestMasterShutdown {
// make sure all the masters properly shutdown // make sure all the masters properly shutdown
assertEquals(0, masterThreads.size()); assertEquals(0, masterThreads.size());
TEST_UTIL.shutdownMiniZKCluster(); util.shutdownMiniZKCluster();
TEST_UTIL.shutdownMiniDFSCluster(); util.shutdownMiniDFSCluster();
TEST_UTIL.cleanupTestDir(); util.cleanupTestDir();
} }
} }

View File

@ -131,7 +131,7 @@ public class TestRestartCluster {
// We don't have to use SnapshotOfRegionAssignmentFromMeta. // We don't have to use SnapshotOfRegionAssignmentFromMeta.
// We use it here because AM used to use it to load all user region placements // We use it here because AM used to use it to load all user region placements
SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta( SnapshotOfRegionAssignmentFromMeta snapshot = new SnapshotOfRegionAssignmentFromMeta(
master.getShortCircuitConnection()); master.getConnection());
snapshot.initialize(); snapshot.initialize();
Map<HRegionInfo, ServerName> regionToRegionServerMap Map<HRegionInfo, ServerName> regionToRegionServerMap
= snapshot.getRegionToRegionServerMap(); = snapshot.getRegionToRegionServerMap();
@ -197,7 +197,7 @@ public class TestRestartCluster {
Threads.sleep(100); Threads.sleep(100);
} }
snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getShortCircuitConnection()); snapshot =new SnapshotOfRegionAssignmentFromMeta(master.getConnection());
snapshot.initialize(); snapshot.initialize();
Map<HRegionInfo, ServerName> newRegionToRegionServerMap = Map<HRegionInfo, ServerName> newRegionToRegionServerMap =
snapshot.getRegionToRegionServerMap(); snapshot.getRegionToRegionServerMap();

View File

@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
import org.apache.hadoop.hbase.master.SplitLogManager.Task; import org.apache.hadoop.hbase.master.SplitLogManager.Task;
import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch;
@ -154,7 +154,7 @@ public class TestSplitLogManager {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -162,7 +162,6 @@ public class TestSplitLogManager {
public MetaTableLocator getMetaTableLocator() { public MetaTableLocator getMetaTableLocator() {
return null; return null;
} }
} }
static Stoppable stopper = new Stoppable() { static Stoppable stopper = new Stoppable() {

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdge;
@ -217,7 +217,7 @@ public class TestHFileCleaner {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }

View File

@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -153,7 +154,7 @@ public class TestHFileLinkCleaner {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }

View File

@ -26,9 +26,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.replication.ReplicationFactory; import org.apache.hadoop.hbase.replication.ReplicationFactory;
import org.apache.hadoop.hbase.replication.ReplicationQueues; import org.apache.hadoop.hbase.replication.ReplicationQueues;
import org.apache.hadoop.hbase.replication.regionserver.Replication; import org.apache.hadoop.hbase.replication.regionserver.Replication;
@ -160,7 +164,7 @@ public class TestLogsCleaner {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -190,6 +194,4 @@ public class TestLogsCleaner {
return false; return false;
} }
} }
} }

View File

@ -18,6 +18,9 @@
package org.apache.hadoop.hbase.quotas; package org.apache.hadoop.hbase.quotas;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -25,20 +28,20 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
/** /**
* Test the quota table helpers (e.g. CRUD operations) * Test the quota table helpers (e.g. CRUD operations)
*/ */
@ -47,6 +50,7 @@ public class TestQuotaTableUtil {
final Log LOG = LogFactory.getLog(getClass()); final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Connection connection;
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
@ -66,6 +70,16 @@ public class TestQuotaTableUtil {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }
@Before
public void before() throws IOException {
this.connection = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration());
}
@After
public void after() throws IOException {
this.connection.close();
}
@Test @Test
public void testTableQuotaUtil() throws Exception { public void testTableQuotaUtil() throws Exception {
final TableName table = TableName.valueOf("testTableQuotaUtilTable"); final TableName table = TableName.valueOf("testTableQuotaUtilTable");
@ -79,13 +93,13 @@ public class TestQuotaTableUtil {
.build(); .build();
// Add user quota and verify it // Add user quota and verify it
QuotaUtil.addTableQuota(TEST_UTIL.getConfiguration(), table, quota); QuotaUtil.addTableQuota(this.connection, table, quota);
Quotas resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); Quotas resQuota = QuotaUtil.getTableQuota(this.connection, table);
assertEquals(quota, resQuota); assertEquals(quota, resQuota);
// Remove user quota and verify it // Remove user quota and verify it
QuotaUtil.deleteTableQuota(TEST_UTIL.getConfiguration(), table); QuotaUtil.deleteTableQuota(this.connection, table);
resQuota = QuotaUtil.getTableQuota(TEST_UTIL.getConfiguration(), table); resQuota = QuotaUtil.getTableQuota(this.connection, table);
assertEquals(null, resQuota); assertEquals(null, resQuota);
} }
@ -102,13 +116,13 @@ public class TestQuotaTableUtil {
.build(); .build();
// Add user quota and verify it // Add user quota and verify it
QuotaUtil.addNamespaceQuota(TEST_UTIL.getConfiguration(), namespace, quota); QuotaUtil.addNamespaceQuota(this.connection, namespace, quota);
Quotas resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); Quotas resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
assertEquals(quota, resQuota); assertEquals(quota, resQuota);
// Remove user quota and verify it // Remove user quota and verify it
QuotaUtil.deleteNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); QuotaUtil.deleteNamespaceQuota(this.connection, namespace);
resQuota = QuotaUtil.getNamespaceQuota(TEST_UTIL.getConfiguration(), namespace); resQuota = QuotaUtil.getNamespaceQuota(this.connection, namespace);
assertEquals(null, resQuota); assertEquals(null, resQuota);
} }
@ -139,33 +153,33 @@ public class TestQuotaTableUtil {
.build(); .build();
// Add user global quota // Add user global quota
QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, quota); QuotaUtil.addUserQuota(this.connection, user, quota);
Quotas resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); Quotas resQuota = QuotaUtil.getUserQuota(this.connection, user);
assertEquals(quota, resQuota); assertEquals(quota, resQuota);
// Add user quota for table // Add user quota for table
QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, table, quotaTable); QuotaUtil.addUserQuota(this.connection, user, table, quotaTable);
Quotas resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); Quotas resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
assertEquals(quotaTable, resQuotaTable); assertEquals(quotaTable, resQuotaTable);
// Add user quota for namespace // Add user quota for namespace
QuotaUtil.addUserQuota(TEST_UTIL.getConfiguration(), user, namespace, quotaNamespace); QuotaUtil.addUserQuota(this.connection, user, namespace, quotaNamespace);
Quotas resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); Quotas resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
assertEquals(quotaNamespace, resQuotaNS); assertEquals(quotaNamespace, resQuotaNS);
// Delete user global quota // Delete user global quota
QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user); QuotaUtil.deleteUserQuota(this.connection, user);
resQuota = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user); resQuota = QuotaUtil.getUserQuota(this.connection, user);
assertEquals(null, resQuota); assertEquals(null, resQuota);
// Delete user quota for table // Delete user quota for table
QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, table); QuotaUtil.deleteUserQuota(this.connection, user, table);
resQuotaTable = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, table); resQuotaTable = QuotaUtil.getUserQuota(this.connection, user, table);
assertEquals(null, resQuotaTable); assertEquals(null, resQuotaTable);
// Delete user quota for namespace // Delete user quota for namespace
QuotaUtil.deleteUserQuota(TEST_UTIL.getConfiguration(), user, namespace); QuotaUtil.deleteUserQuota(this.connection, user, namespace);
resQuotaNS = QuotaUtil.getUserQuota(TEST_UTIL.getConfiguration(), user, namespace); resQuotaNS = QuotaUtil.getUserQuota(this.connection, user, namespace);
assertEquals(null, resQuotaNS); assertEquals(null, resQuotaNS);
} }
} }

View File

@ -57,6 +57,7 @@ public class TestHRegionOnCluster {
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception { public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
final int NUM_MASTERS = 1; final int NUM_MASTERS = 1;
final int NUM_RS = 3; final int NUM_RS = 3;
Admin hbaseAdmin = null;
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS); TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
try { try {
@ -68,7 +69,7 @@ public class TestHRegionOnCluster {
// Create table // Create table
HTableDescriptor desc = new HTableDescriptor(TABLENAME); HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(new HColumnDescriptor(FAMILY)); desc.addFamily(new HColumnDescriptor(FAMILY));
Admin hbaseAdmin = TEST_UTIL.getHBaseAdmin(); hbaseAdmin = master.getConnection().getAdmin();
hbaseAdmin.createTable(desc); hbaseAdmin.createTable(desc);
assertTrue(hbaseAdmin.isTableAvailable(TABLENAME)); assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
@ -130,6 +131,7 @@ public class TestHRegionOnCluster {
putDataAndVerify(table, "r4", FAMILY, "v4", 4); putDataAndVerify(table, "r4", FAMILY, "v4", 4);
} finally { } finally {
if (hbaseAdmin != null) hbaseAdmin.close();
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }
} }

View File

@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.Cacheable;
@ -477,7 +477,7 @@ public class TestHeapMemoryManager {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }

View File

@ -209,12 +209,12 @@ public class TestRegionMergeTransactionOnCluster {
table.close(); table.close();
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(master.getShortCircuitConnection(), tableName); .getTableRegionsAndLocations(master.getConnection(), tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst(); HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
HTableDescriptor tableDescritor = master.getTableDescriptors().get( HTableDescriptor tableDescritor = master.getTableDescriptors().get(
tableName); tableName);
Result mergedRegionResult = MetaTableAccessor.getRegionResult( Result mergedRegionResult = MetaTableAccessor.getRegionResult(
master.getShortCircuitConnection(), mergedRegionInfo.getRegionName()); master.getConnection(), mergedRegionInfo.getRegionName());
// contains merge reference in META // contains merge reference in META
assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, assertTrue(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@ -257,7 +257,7 @@ public class TestRegionMergeTransactionOnCluster {
assertFalse(fs.exists(regionBdir)); assertFalse(fs.exists(regionBdir));
mergedRegionResult = MetaTableAccessor.getRegionResult( mergedRegionResult = MetaTableAccessor.getRegionResult(
master.getShortCircuitConnection(), mergedRegionInfo.getRegionName()); master.getConnection(), mergedRegionInfo.getRegionName());
assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
HConstants.MERGEA_QUALIFIER) != null); HConstants.MERGEA_QUALIFIER) != null);
assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY, assertFalse(mergedRegionResult.getValue(HConstants.CATALOG_FAMILY,
@ -327,13 +327,13 @@ public class TestRegionMergeTransactionOnCluster {
createTableAndLoadData(master, tableName, 5, 2); createTableAndLoadData(master, tableName, 5, 2);
List<Pair<HRegionInfo, ServerName>> initialRegionToServers = List<Pair<HRegionInfo, ServerName>> initialRegionToServers =
MetaTableAccessor.getTableRegionsAndLocations( MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tableName); master.getConnection(), tableName);
// Merge 1st and 2nd region // Merge 1st and 2nd region
PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(master, tableName, PairOfSameType<HRegionInfo> mergedRegions = mergeRegionsAndVerifyRegionNum(master, tableName,
0, 2, 5 * 2 - 2); 0, 2, 5 * 2 - 2);
List<Pair<HRegionInfo, ServerName>> currentRegionToServers = List<Pair<HRegionInfo, ServerName>> currentRegionToServers =
MetaTableAccessor.getTableRegionsAndLocations( MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tableName); master.getConnection(), tableName);
List<HRegionInfo> initialRegions = new ArrayList<HRegionInfo>(); List<HRegionInfo> initialRegions = new ArrayList<HRegionInfo>();
for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) { for (Pair<HRegionInfo, ServerName> p : initialRegionToServers) {
initialRegions.add(p.getFirst()); initialRegions.add(p.getFirst());
@ -373,7 +373,7 @@ public class TestRegionMergeTransactionOnCluster {
int regionAnum, int regionBnum) throws Exception { int regionAnum, int regionBnum) throws Exception {
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations( .getTableRegionsAndLocations(
master.getShortCircuitConnection(), tablename); master.getConnection(), tablename);
HRegionInfo regionA = tableRegions.get(regionAnum).getFirst(); HRegionInfo regionA = tableRegions.get(regionAnum).getFirst();
HRegionInfo regionB = tableRegions.get(regionBnum).getFirst(); HRegionInfo regionB = tableRegions.get(regionBnum).getFirst();
TEST_UTIL.getHBaseAdmin().mergeRegions( TEST_UTIL.getHBaseAdmin().mergeRegions(
@ -389,7 +389,7 @@ public class TestRegionMergeTransactionOnCluster {
long timeout = System.currentTimeMillis() + waitTime; long timeout = System.currentTimeMillis() + waitTime;
while (System.currentTimeMillis() < timeout) { while (System.currentTimeMillis() < timeout) {
tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tablename); master.getConnection(), tablename);
tableRegionsInMaster = master.getAssignmentManager().getRegionStates() tableRegionsInMaster = master.getAssignmentManager().getRegionStates()
.getRegionsOfTable(tablename); .getRegionsOfTable(tablename);
if (tableRegionsInMeta.size() == expectedRegionNum if (tableRegionsInMeta.size() == expectedRegionNum
@ -400,7 +400,7 @@ public class TestRegionMergeTransactionOnCluster {
} }
tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations( tableRegionsInMeta = MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tablename); master.getConnection(), tablename);
LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta)); LOG.info("Regions after merge:" + Joiner.on(',').join(tableRegionsInMeta));
assertEquals(expectedRegionNum, tableRegionsInMeta.size()); assertEquals(expectedRegionNum, tableRegionsInMeta.size());
} }
@ -430,14 +430,14 @@ public class TestRegionMergeTransactionOnCluster {
List<Pair<HRegionInfo, ServerName>> tableRegions; List<Pair<HRegionInfo, ServerName>> tableRegions;
while (System.currentTimeMillis() < timeout) { while (System.currentTimeMillis() < timeout) {
tableRegions = MetaTableAccessor.getTableRegionsAndLocations( tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tablename); master.getConnection(), tablename);
if (tableRegions.size() == numRegions * replication) if (tableRegions.size() == numRegions * replication)
break; break;
Thread.sleep(250); Thread.sleep(250);
} }
tableRegions = MetaTableAccessor.getTableRegionsAndLocations( tableRegions = MetaTableAccessor.getTableRegionsAndLocations(
master.getShortCircuitConnection(), tablename); master.getConnection(), tablename);
LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions)); LOG.info("Regions after load: " + Joiner.on(',').join(tableRegions));
assertEquals(numRegions * replication, tableRegions.size()); assertEquals(numRegions * replication, tableRegions.size());
return table; return table;

View File

@ -18,10 +18,11 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
import static org.hamcrest.CoreMatchers.*; import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
@ -36,17 +37,17 @@ import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SplitLogCounters; import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask; import org.apache.hadoop.hbase.SplitLogTask;
import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
@ -128,7 +129,7 @@ public class TestSplitLogWorker {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -136,7 +137,6 @@ public class TestSplitLogWorker {
public MetaTableLocator getMetaTableLocator() { public MetaTableLocator getMetaTableLocator() {
return null; return null;
} }
} }
private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems) private void waitForCounter(AtomicLong ctr, long oldval, long newval, long timems)

View File

@ -634,7 +634,7 @@ public class TestSplitTransactionOnCluster {
admin.setBalancerRunning(false, true); admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us. // Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false); cluster.getMaster().setCatalogJanitorEnabled(false);
boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
tableName); tableName);
assertEquals("The specified table should present.", true, tableExists); assertEquals("The specified table should present.", true, tableExists);
final HRegion region = findSplittableRegion(regions); final HRegion region = findSplittableRegion(regions);
@ -646,7 +646,7 @@ public class TestSplitTransactionOnCluster {
} catch (IOException e) { } catch (IOException e) {
} }
tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
tableName); tableName);
assertEquals("The specified table should present.", true, tableExists); assertEquals("The specified table should present.", true, tableExists);
} finally { } finally {
@ -680,7 +680,7 @@ public class TestSplitTransactionOnCluster {
admin.setBalancerRunning(false, true); admin.setBalancerRunning(false, true);
// Turn off the meta scanner so it don't remove parent on us. // Turn off the meta scanner so it don't remove parent on us.
cluster.getMaster().setCatalogJanitorEnabled(false); cluster.getMaster().setCatalogJanitorEnabled(false);
boolean tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), boolean tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
tableName); tableName);
assertEquals("The specified table should be present.", true, tableExists); assertEquals("The specified table should be present.", true, tableExists);
final HRegion region = findSplittableRegion(oldRegions); final HRegion region = findSplittableRegion(oldRegions);
@ -703,7 +703,7 @@ public class TestSplitTransactionOnCluster {
Thread.sleep(1000); Thread.sleep(1000);
} while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1))) } while ((newRegions.contains(oldRegions.get(0)) || newRegions.contains(oldRegions.get(1)))
|| newRegions.size() != 4); || newRegions.size() != 4);
tableExists = MetaTableAccessor.tableExists(regionServer.getShortCircuitConnection(), tableExists = MetaTableAccessor.tableExists(regionServer.getConnection(),
tableName); tableName);
assertEquals("The specified table should be present.", true, tableExists); assertEquals("The specified table should be present.", true, tableExists);
// exists works on stale and we see the put after the flush // exists works on stale and we see the put after the flush

View File

@ -32,11 +32,17 @@ import java.util.Map;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
@ -267,33 +273,40 @@ public class TestPerTableCFReplication {
@Test(timeout=300000) @Test(timeout=300000)
public void testPerTableCFReplication() throws Exception { public void testPerTableCFReplication() throws Exception {
LOG.info("testPerTableCFReplication"); LOG.info("testPerTableCFReplication");
ReplicationAdmin admin1 = new ReplicationAdmin(conf1); ReplicationAdmin replicationAdmin = new ReplicationAdmin(conf1);
Connection connection1 = ConnectionFactory.createConnection(conf1);
Connection connection2 = ConnectionFactory.createConnection(conf2);
Connection connection3 = ConnectionFactory.createConnection(conf3);
try {
Admin admin1 = connection1.getAdmin();
Admin admin2 = connection2.getAdmin();
Admin admin3 = connection3.getAdmin();
new HBaseAdmin(conf1).createTable(tabA); admin1.createTable(tabA);
new HBaseAdmin(conf1).createTable(tabB); admin1.createTable(tabB);
new HBaseAdmin(conf1).createTable(tabC); admin1.createTable(tabC);
new HBaseAdmin(conf2).createTable(tabA); admin2.createTable(tabA);
new HBaseAdmin(conf2).createTable(tabB); admin2.createTable(tabB);
new HBaseAdmin(conf2).createTable(tabC); admin2.createTable(tabC);
new HBaseAdmin(conf3).createTable(tabA); admin3.createTable(tabA);
new HBaseAdmin(conf3).createTable(tabB); admin3.createTable(tabB);
new HBaseAdmin(conf3).createTable(tabC); admin3.createTable(tabC);
Table htab1A = new HTable(conf1, tabAName); Table htab1A = connection1.getTable(tabAName);
Table htab2A = new HTable(conf2, tabAName); Table htab2A = connection2.getTable(tabAName);
Table htab3A = new HTable(conf3, tabAName); Table htab3A = connection3.getTable(tabAName);
Table htab1B = new HTable(conf1, tabBName); Table htab1B = connection1.getTable(tabBName);
Table htab2B = new HTable(conf2, tabBName); Table htab2B = connection2.getTable(tabBName);
Table htab3B = new HTable(conf3, tabBName); Table htab3B = connection3.getTable(tabBName);
Table htab1C = new HTable(conf1, tabCName); Table htab1C = connection1.getTable(tabCName);
Table htab2C = new HTable(conf2, tabCName); Table htab2C = connection2.getTable(tabCName);
Table htab3C = new HTable(conf3, tabCName); Table htab3C = connection3.getTable(tabCName);
// A. add cluster2/cluster3 as peers to cluster1 // A. add cluster2/cluster3 as peers to cluster1
admin1.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3"); replicationAdmin.addPeer("2", utility2.getClusterKey(), "TC;TB:f1,f3");
admin1.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2"); replicationAdmin.addPeer("3", utility3.getClusterKey(), "TA;TB:f1,f2");
// A1. tableA can only replicated to cluster3 // A1. tableA can only replicated to cluster3
putAndWaitWithFamily(row1, f1Name, htab1A, htab3A); putAndWaitWithFamily(row1, f1Name, htab1A, htab3A);
@ -336,8 +349,8 @@ public class TestPerTableCFReplication {
deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C); deleteAndWaitWithFamily(row1, f3Name, htab1C, htab2C);
// B. change peers' replicable table-cf config // B. change peers' replicable table-cf config
admin1.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3"); replicationAdmin.setPeerTableCFs("2", "TA:f1,f2; TC:f2,f3");
admin1.setPeerTableCFs("3", "TB; TC:f3"); replicationAdmin.setPeerTableCFs("3", "TB; TC:f3");
// B1. cf 'f1' of tableA can only replicated to cluster2 // B1. cf 'f1' of tableA can only replicated to cluster2
putAndWaitWithFamily(row2, f1Name, htab1A, htab2A); putAndWaitWithFamily(row2, f1Name, htab1A, htab2A);
@ -376,6 +389,11 @@ public class TestPerTableCFReplication {
// cf 'f3' of tableC can replicated to cluster2 and cluster3 // cf 'f3' of tableC can replicated to cluster2 and cluster3
putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); putAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C); deleteAndWaitWithFamily(row2, f3Name, htab1C, htab2C, htab3C);
} finally {
connection1.close();
connection2.close();
connection3.close();
}
} }
private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException { private void ensureRowNotReplicated(byte[] row, byte[] fam, Table... tables) throws IOException {

View File

@ -17,6 +17,9 @@
*/ */
package org.apache.hadoop.hbase.replication; package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -28,7 +31,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests; import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@ -38,12 +41,9 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Test;
import static org.junit.Assert.*;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@Category({ReplicationTests.class, MediumTests.class}) @Category({ReplicationTests.class, MediumTests.class})
@ -149,7 +149,7 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
} }
@Override @Override
public HConnection getShortCircuitConnection() { public ClusterConnection getConnection() {
return null; return null;
} }
@ -185,4 +185,3 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
} }
} }
} }

Some files were not shown because too many files have changed in this diff Show More