diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index ded27e3ee5f..5abf6a43d68 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -17,22 +17,14 @@
*/
package org.apache.hadoop.hbase;
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.ServiceException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
@@ -56,8 +48,17 @@ import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.ServiceException;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
/**
* Read/write operations on region and assignment information store in
@@ -168,37 +169,31 @@ public class MetaTableAccessor {
}
/**
- * Callers should call close on the returned {@link HTable} instance.
- * @param connection connection we're using to access table
- * @param tableName Table to get an {@link org.apache.hadoop.hbase.client.HTable} against.
- * @return An {@link org.apache.hadoop.hbase.client.HTable} for tableName
+ * Callers should call close on the returned {@link Table} instance.
+ * @param connection connection we're using to access Meta
+ * @return An {@link Table} for hbase:meta
* @throws IOException
- * @SuppressWarnings("deprecation")
*/
- private static Table getHTable(final Connection connection, final TableName tableName)
+ static Table getMetaHTable(final Connection connection)
throws IOException {
// We used to pass whole CatalogTracker in here, now we just pass in Connection
if (connection == null || connection.isClosed()) {
throw new NullPointerException("No connection");
}
// If the passed in 'connection' is 'managed' -- i.e. every second test uses
- // an HTable or an HBaseAdmin with managed connections -- then doing
+ // a Table or an HBaseAdmin with managed connections -- then doing
// connection.getTable will throw an exception saying you are NOT to use
// managed connections getting tables. Leaving this as it is for now. Will
// revisit when inclined to change all tests. User code probaby makes use of
// managed connections too so don't change it till post hbase 1.0.
- return new HTable(tableName, connection);
- }
-
- /**
- * Callers should call close on the returned {@link HTable} instance.
- * @param connection connection we're using to access Meta
- * @return An {@link HTable} for hbase:meta
- * @throws IOException
- */
- static Table getMetaHTable(final Connection connection)
- throws IOException {
- return getHTable(connection, TableName.META_TABLE_NAME);
+ //
+ // There should still be a way to use this method with an unmanaged connection.
+ if (connection instanceof ClusterConnection) {
+ if (((ClusterConnection) connection).isManaged()) {
+ return new HTable(TableName.META_TABLE_NAME, (ClusterConnection) connection);
+ }
+ }
+ return connection.getTable(TableName.META_TABLE_NAME);
}
/**
@@ -800,7 +795,7 @@ public class MetaTableAccessor {
* @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
* parent
*/
- public static PairOfSameType getDaughterRegions(Result data) throws IOException {
+ public static PairOfSameType getDaughterRegions(Result data) {
HRegionInfo splitA = getHRegionInfo(data, HConstants.SPLITA_QUALIFIER);
HRegionInfo splitB = getHRegionInfo(data, HConstants.SPLITB_QUALIFIER);
@@ -814,7 +809,7 @@ public class MetaTableAccessor {
* @return a pair of HRegionInfo or PairOfSameType(null, null) if the region is not a split
* parent
*/
- public static PairOfSameType getMergeRegions(Result data) throws IOException {
+ public static PairOfSameType getMergeRegions(Result data) {
HRegionInfo mergeA = getHRegionInfo(data, HConstants.MERGEA_QUALIFIER);
HRegionInfo mergeB = getHRegionInfo(data, HConstants.MERGEB_QUALIFIER);
@@ -1081,8 +1076,8 @@ public class MetaTableAccessor {
/**
* Adds a hbase:meta row for the specified new region to the given catalog table. The
- * HTable is not flushed or closed.
- * @param meta the HTable for META
+ * Table is not flushed or closed.
+ * @param meta the Table for META
* @param regionInfo region information
* @throws IOException if problem connecting or updating meta
*/
@@ -1097,7 +1092,7 @@ public class MetaTableAccessor {
* {@link #splitRegion(org.apache.hadoop.hbase.client.Connection,
* HRegionInfo, HRegionInfo, HRegionInfo, ServerName)}
* if you want to do that.
- * @param meta the HTable for META
+ * @param meta the Table for META
* @param regionInfo region information
* @param splitA first split daughter of the parent regionInfo
* @param splitB second split daughter of the parent regionInfo
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index ba07ce45824..8989725729e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -282,5 +282,11 @@ public interface ClusterConnection extends HConnection {
* @return RpcRetryingCallerFactory
*/
RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf);
+
+ /**
+ *
+ * @return true if this is a managed connection.
+ */
+ boolean isManaged();
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
index a30ce9d98ab..001132835be 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
@@ -442,4 +442,9 @@ abstract class ConnectionAdapter implements ClusterConnection {
public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
return wrappedConnection.getNewRpcRetryingCallerFactory(conf);
}
+
+ @Override
+ public boolean isManaged() {
+ return wrappedConnection.isManaged();
+ }
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index ce8651c24f7..f813ebd2f81 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -2508,6 +2508,11 @@ class ConnectionManager {
public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
return RpcRetryingCallerFactory.instantiate(conf, this.interceptor);
}
+
+ @Override
+ public boolean isManaged() {
+ return managed;
+ }
}
/**
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 998f1e2c313..08dd0eca728 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -1782,7 +1782,7 @@ public class HBaseFsck extends Configured {
for (OnlineEntry rse : hi.deployedEntries) {
LOG.debug("Undeploy region " + rse.hri + " from " + rse.hsa);
try {
- HBaseFsckRepair.closeRegionSilentlyAndWait(admin, rse.hsa, rse.hri);
+ HBaseFsckRepair.closeRegionSilentlyAndWait(connection, rse.hsa, rse.hri);
offline(rse.hri.getRegionName());
} catch (IOException ioe) {
LOG.warn("Got exception when attempting to offline region "
@@ -1847,7 +1847,7 @@ public class HBaseFsck extends Configured {
continue;
}
// close the region -- close files and remove assignment
- HBaseFsckRepair.closeRegionSilentlyAndWait(admin, serverName, hri);
+ HBaseFsckRepair.closeRegionSilentlyAndWait(connection, serverName, hri);
}
}
@@ -2042,7 +2042,7 @@ public class HBaseFsck extends Configured {
if (shouldFixAssignments()) {
errors.print("Trying to close the region " + descriptiveName);
setShouldRerun();
- HBaseFsckRepair.fixMultiAssignment(admin, hbi.metaEntry, hbi.deployedOn);
+ HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
}
} else if (inMeta && inHdfs && isMultiplyDeployed) {
errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName
@@ -2053,7 +2053,7 @@ public class HBaseFsck extends Configured {
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
setShouldRerun();
- HBaseFsckRepair.fixMultiAssignment(admin, hbi.metaEntry, hbi.deployedOn);
+ HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
}
} else if (inMeta && inHdfs && isDeployed && !deploymentMatchesMeta) {
errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region "
@@ -2064,7 +2064,7 @@ public class HBaseFsck extends Configured {
if (shouldFixAssignments()) {
errors.print("Trying to fix assignment error...");
setShouldRerun();
- HBaseFsckRepair.fixMultiAssignment(admin, hbi.metaEntry, hbi.deployedOn);
+ HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn);
HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI());
}
} else {
@@ -2973,7 +2973,7 @@ public class HBaseFsck extends Configured {
errors.print("Trying to fix a problem with hbase:meta..");
setShouldRerun();
// try fix it (treat is a dupe assignment)
- HBaseFsckRepair.fixMultiAssignment(admin, metaHbckInfo.metaEntry, servers);
+ HBaseFsckRepair.fixMultiAssignment(connection, metaHbckInfo.metaEntry, servers);
}
}
// rerun hbck with hopefully fixed META
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
index 56967f922e0..b5740b26b2c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
@@ -18,25 +18,20 @@
*/
package org.apache.hadoop.hbase.util;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
@@ -47,6 +42,12 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.zookeeper.KeeperException;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
/**
* This class contains helper methods that repair parts of hbase's filesystem
* contents.
@@ -60,22 +61,22 @@ public class HBaseFsckRepair {
* and then force ZK unassigned node to OFFLINE to trigger assignment by
* master.
*
- * @param admin HBase admin used to undeploy
+ * @param connection HBase connection to the cluster
* @param region Region to undeploy
* @param servers list of Servers to undeploy from
*/
- public static void fixMultiAssignment(HBaseAdmin admin, HRegionInfo region,
+ public static void fixMultiAssignment(HConnection connection, HRegionInfo region,
List servers)
throws IOException, KeeperException, InterruptedException {
HRegionInfo actualRegion = new HRegionInfo(region);
// Close region on the servers silently
for(ServerName server : servers) {
- closeRegionSilentlyAndWait(admin, server, actualRegion);
+ closeRegionSilentlyAndWait(connection, server, actualRegion);
}
// Force ZK node to OFFLINE so master assigns
- forceOfflineInZK(admin, actualRegion);
+ forceOfflineInZK(connection.getAdmin(), actualRegion);
}
/**
@@ -149,16 +150,15 @@ public class HBaseFsckRepair {
* (default 120s) to close the region. This bypasses the active hmaster.
*/
@SuppressWarnings("deprecation")
- public static void closeRegionSilentlyAndWait(HBaseAdmin admin,
+ public static void closeRegionSilentlyAndWait(HConnection connection,
ServerName server, HRegionInfo region) throws IOException, InterruptedException {
- HConnection connection = admin.getConnection();
AdminService.BlockingInterface rs = connection.getAdmin(server);
try {
ProtobufUtil.closeRegion(rs, server, region.getRegionName());
} catch (IOException e) {
LOG.warn("Exception when closing region: " + region.getRegionNameAsString(), e);
}
- long timeout = admin.getConfiguration()
+ long timeout = connection.getConfiguration()
.getLong("hbase.hbck.close.timeout", 120000);
long expiration = timeout + System.currentTimeMillis();
while (System.currentTimeMillis() < expiration) {
@@ -180,7 +180,8 @@ public class HBaseFsckRepair {
*/
public static void fixMetaHoleOnlineAndAddReplicas(Configuration conf,
HRegionInfo hri, Collection servers, int numReplicas) throws IOException {
- Table meta = new HTable(conf, TableName.META_TABLE_NAME);
+ Connection conn = ConnectionFactory.createConnection(conf);
+ Table meta = conn.getTable(TableName.META_TABLE_NAME);
Put put = MetaTableAccessor.makePutFromRegionInfo(hri);
if (numReplicas > 1) {
Random r = new Random();
@@ -196,6 +197,7 @@ public class HBaseFsckRepair {
}
meta.put(put);
meta.close();
+ conn.close();
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 548c072d785..eadebd377a3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
@@ -66,10 +67,11 @@ public class TestMetaTableAccessor {
// responsive. 1 second is default as is ten retries.
c.setLong("hbase.client.pause", 1000);
c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10);
- connection = HConnectionManager.getConnection(c);
+ connection = ConnectionFactory.createConnection(c);
}
@AfterClass public static void afterClass() throws Exception {
+ connection.close();
UTIL.shutdownMiniCluster();
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index 916d81f557c..722bb207dc7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -21,14 +21,11 @@ package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
@@ -48,6 +45,12 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
/**
* Test whether region re-balancing works. (HBASE-71)
*/
@@ -98,7 +101,8 @@ public class TestRegionRebalancing {
@SuppressWarnings("deprecation")
public void testRebalanceOnRegionServerNumberChange()
throws IOException, InterruptedException {
- HBaseAdmin admin = new HBaseAdmin(UTIL.getConfiguration());
+ Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
+ Admin admin = connection.getAdmin();
admin.createTable(this.desc, Arrays.copyOfRange(HBaseTestingUtility.KEYS,
1, HBaseTestingUtility.KEYS.length));
this.table = new HTable(UTIL.getConfiguration(), this.desc.getTableName());
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index 86c8e7ace4c..a99b0476b12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -137,6 +137,8 @@ public class HConnectionTestingUtility {
RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR));
HTableInterface t = Mockito.mock(HTableInterface.class);
Mockito.when(c.getTable((TableName)Mockito.any())).thenReturn(t);
+ ResultScanner rs = Mockito.mock(ResultScanner.class);
+ Mockito.when(t.getScanner((Scan)Mockito.any())).thenReturn(rs);
return c;
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index 43ee682dd42..fc06d150358 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -73,6 +75,9 @@ public class TestReplicationWithTags {
private static ReplicationAdmin replicationAdmin;
+ private static Connection connection1;
+ private static Connection connection2;
+
private static Table htable1;
private static Table htable2;
@@ -137,22 +142,13 @@ public class TestReplicationWithTags {
fam.setMaxVersions(3);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
- Admin admin = null;
- try {
- admin = new HBaseAdmin(conf1);
+ try (Connection conn = ConnectionFactory.createConnection(conf1);
+ Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
- } finally {
- if (admin != null) {
- admin.close();
- }
}
- try {
- admin = new HBaseAdmin(conf2);
+ try (Connection conn = ConnectionFactory.createConnection(conf2);
+ Admin admin = conn.getAdmin()) {
admin.createTable(table, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
- } finally {
- if(admin != null){
- admin.close();
- }
}
htable1 = new HTable(conf1, TABLE_NAME);
htable1.setWriteBufferSize(1024);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index d4a7fdc6ed7..6f40551c578 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -271,12 +271,12 @@ public class TestHBaseFsck {
* This method is used to undeploy a region -- close it and attempt to
* remove its state from the Master.
*/
- private void undeployRegion(HBaseAdmin admin, ServerName sn,
+ private void undeployRegion(HConnection conn, ServerName sn,
HRegionInfo hri) throws IOException, InterruptedException {
try {
- HBaseFsckRepair.closeRegionSilentlyAndWait(admin, sn, hri);
+ HBaseFsckRepair.closeRegionSilentlyAndWait(conn, sn, hri);
if (!hri.isMetaTable()) {
- admin.offline(hri.getRegionName());
+ conn.getAdmin().offline(hri.getRegionName());
}
} catch (IOException ioe) {
LOG.warn("Got exception when attempting to offline region "
@@ -311,6 +311,7 @@ public class TestHBaseFsck {
dumpMeta(htd.getTableName());
Map hris = tbl.getRegionLocations();
+ HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
for (Entry e: hris.entrySet()) {
HRegionInfo hri = e.getKey();
ServerName hsa = e.getValue();
@@ -323,7 +324,7 @@ public class TestHBaseFsck {
if (unassign) {
LOG.info("Undeploying region " + hri + " from server " + hsa);
- undeployRegion(new HBaseAdmin(conf), hsa, hri);
+ undeployRegion(conn, hsa, hri);
}
if (regionInfoOnly) {
@@ -360,6 +361,7 @@ public class TestHBaseFsck {
TEST_UTIL.getMetaTableRows(htd.getTableName());
LOG.info("*** After delete:");
dumpMeta(htd.getTableName());
+ conn.close();
}
/**
@@ -418,8 +420,9 @@ public class TestHBaseFsck {
* @throws IOException
*/
void deleteTable(TableName tablename) throws IOException {
- HBaseAdmin admin = new HBaseAdmin(conf);
- admin.getConnection().clearRegionCache();
+ HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
+ Admin admin = conn.getAdmin();
+ conn.clearRegionCache();
if (admin.isTableEnabled(tablename)) {
admin.disableTableAsync(tablename);
}
@@ -439,6 +442,8 @@ public class TestHBaseFsck {
}
}
admin.deleteTable(tablename);
+ admin.close();
+ conn.close();
}
/**
@@ -899,7 +904,7 @@ public class TestHBaseFsck {
public void testSidelineOverlapRegion() throws Exception {
TableName table =
TableName.valueOf("testSidelineOverlapRegion");
- try {
+ try (HConnection conn = (HConnection) ConnectionFactory.createConnection(conf)){
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
@@ -941,7 +946,7 @@ public class TestHBaseFsck {
}
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
- HBaseFsckRepair.closeRegionSilentlyAndWait(admin,
+ HBaseFsckRepair.closeRegionSilentlyAndWait(conn,
cluster.getRegionServer(k).getServerName(), hbi.getHdfsHRI());
admin.offline(regionName);
break;
@@ -950,7 +955,7 @@ public class TestHBaseFsck {
assertNotNull(regionName);
assertNotNull(serverName);
- Table meta = new HTable(conf, TableName.META_TABLE_NAME, executorService);
+ Table meta = conn.getTable(TableName.META_TABLE_NAME, executorService);
Put put = new Put(regionName);
put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
Bytes.toBytes(serverName.getHostAndPort()));
@@ -1635,8 +1640,8 @@ public class TestHBaseFsck {
public void testSplitDaughtersNotInMeta() throws Exception {
TableName table =
TableName.valueOf("testSplitdaughtersNotInMeta");
- Table meta = null;
- try {
+ try (HConnection conn = (HConnection) ConnectionFactory.createConnection(conf);
+ Table meta = conn.getTable(TableName.META_TABLE_NAME)){
setupTable(table);
assertEquals(ROWKEYS.length, countRows());
@@ -1644,13 +1649,11 @@ public class TestHBaseFsck {
TEST_UTIL.getHBaseAdmin().flush(table);
HRegionLocation location = tbl.getRegionLocation("B");
- meta = new HTable(conf, TableName.META_TABLE_NAME);
HRegionInfo hri = location.getRegionInfo();
// do a regular split
- HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
byte[] regionName = location.getRegionInfo().getRegionName();
- admin.splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
+ conn.getAdmin().splitRegion(location.getRegionInfo().getRegionName(), Bytes.toBytes("BM"));
TestEndToEndSplitTransaction.blockUntilRegionSplit(
TEST_UTIL.getConfiguration(), 60000, regionName, true);
@@ -1658,8 +1661,8 @@ public class TestHBaseFsck {
// Delete daughter regions from meta, but not hdfs, unassign it.
Map hris = tbl.getRegionLocations();
- undeployRegion(admin, hris.get(daughters.getFirst()), daughters.getFirst());
- undeployRegion(admin, hris.get(daughters.getSecond()), daughters.getSecond());
+ undeployRegion(conn, hris.get(daughters.getFirst()), daughters.getFirst());
+ undeployRegion(conn, hris.get(daughters.getSecond()), daughters.getSecond());
meta.delete(new Delete(daughters.getFirst().getRegionName()));
meta.delete(new Delete(daughters.getSecond().getRegionName()));
@@ -1693,7 +1696,6 @@ public class TestHBaseFsck {
assertNoErrors(doFsck(conf, false)); //should be fixed by now
} finally {
deleteTable(table);
- IOUtils.closeQuietly(meta);
}
}
@@ -2418,13 +2420,9 @@ public class TestHBaseFsck {
HRegionInfo hri = metaLocation.getRegionInfo();
if (unassign) {
LOG.info("Undeploying meta region " + hri + " from server " + hsa);
- Connection unmanagedConnection = ConnectionFactory.createConnection(conf);
- HBaseAdmin admin = (HBaseAdmin) unmanagedConnection.getAdmin();
- try {
- undeployRegion(admin, hsa, hri);
- } finally {
- admin.close();
- unmanagedConnection.close();
+ try (HConnection unmanagedConnection =
+ (HConnection) ConnectionFactory.createConnection(conf)) {
+ undeployRegion(unmanagedConnection, hsa, hri);
}
}