HBASE-12029 Use Table and RegionLocator in HTable.getRegionLocations() (Solomon Duskis)

Conflicts:
	hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
	hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
This commit is contained in:
stack 2014-10-13 12:24:27 -07:00
parent d817dab0c3
commit 12ac08ce44
13 changed files with 163 additions and 90 deletions

View File

@ -1004,7 +1004,7 @@ class ConnectionManager {
public List<HRegionLocation> locateRegions(final TableName tableName, public List<HRegionLocation> locateRegions(final TableName tableName,
final boolean useCache, final boolean offlined) throws IOException { final boolean useCache, final boolean offlined) throws IOException {
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this, NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, this,
tableName, offlined); tableName);
final List<HRegionLocation> locations = new ArrayList<HRegionLocation>(); final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
for (HRegionInfo regionInfo : regions.keySet()) { for (HRegionInfo regionInfo : regions.keySet()) {
RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true); RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);

View File

@ -25,6 +25,7 @@ import java.util.Collections;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.NavigableMap; import java.util.NavigableMap;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.Callable; import java.util.concurrent.Callable;
@ -614,12 +615,29 @@ public class HTable implements HTableInterface, RegionLocator {
* This is mainly useful for the MapReduce integration. * This is mainly useful for the MapReduce integration.
* @return A map of HRegionInfo with it's server address * @return A map of HRegionInfo with it's server address
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @deprecated This is no longer a public API * @deprecated This is no longer a public API. Use {@link #getAllRegionLocations()} instead.
*/ */
@Deprecated @Deprecated
public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws IOException { public NavigableMap<HRegionInfo, ServerName> getRegionLocations() throws IOException {
// TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocator, singular, returns an HRegionLocation. // TODO: Odd that this returns a Map of HRI to SN whereas getRegionLocator, singular, returns an HRegionLocation.
return MetaScanner.allTableRegions(getConfiguration(), this.connection, getName(), false); return MetaScanner.allTableRegions(getConfiguration(), this.connection, getName());
}
/**
* Gets all the regions and their address for this table.
* <p>
* This is mainly useful for the MapReduce integration.
* @return A map of HRegionInfo with it's server address
* @throws IOException if a remote or network exception occurs
*/
@Override
public List<HRegionLocation> getAllRegionLocations() throws IOException {
NavigableMap<HRegionInfo, ServerName> locations = getRegionLocations();
ArrayList<HRegionLocation> regions = new ArrayList<>(locations.size());
for (Entry<HRegionInfo, ServerName> entry : locations.entrySet()) {
regions.add(new HRegionLocation(entry.getKey(), entry.getValue()));
}
return regions;
} }
/** /**

View File

@ -132,8 +132,10 @@ public class MetaScanner {
final byte[] row, final int rowLimit, final TableName metaTableName) final byte[] row, final int rowLimit, final TableName metaTableName)
throws IOException { throws IOException {
boolean closeConnection = false;
if (connection == null){ if (connection == null){
connection = ConnectionManager.getConnectionInternal(configuration); connection = ConnectionFactory.createConnection(configuration);
closeConnection = true;
} }
int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE; int rowUpperLimit = rowLimit > 0 ? rowLimit: Integer.MAX_VALUE;
@ -217,7 +219,9 @@ public class MetaScanner {
LOG.debug("Got exception in closing meta table", t); LOG.debug("Got exception in closing meta table", t);
} }
} }
if (closeConnection) {
connection.close();
}
} }
} }
@ -269,6 +273,21 @@ public class MetaScanner {
return regions; return regions;
} }
/**
* Lists all of the table regions currently in META.
* @param conf
* @param offlined True if we are to include offlined regions, false and we'll
* leave out offlined regions from returned list.
* @return Map of all user-space regions to servers
* @throws IOException
* @deprecated Use {@link #allTableRegions(Configuration, Connection, TableName)} instead
*/
@Deprecated
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf,
Connection connection, final TableName tableName, boolean offlined) throws IOException {
return allTableRegions(conf, connection, tableName);
}
/** /**
* Lists all of the table regions currently in META. * Lists all of the table regions currently in META.
* @param conf * @param conf
@ -278,8 +297,7 @@ public class MetaScanner {
* @throws IOException * @throws IOException
*/ */
public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf, public static NavigableMap<HRegionInfo, ServerName> allTableRegions(Configuration conf,
Connection connection, final TableName tableName, Connection connection, final TableName tableName) throws IOException {
final boolean offlined) throws IOException {
final NavigableMap<HRegionInfo, ServerName> regions = final NavigableMap<HRegionInfo, ServerName> regions =
new TreeMap<HRegionInfo, ServerName>(); new TreeMap<HRegionInfo, ServerName>();
MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) { MetaScannerVisitor visitor = new TableMetaScannerVisitor(tableName) {

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.util.Pair;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.List;
/** /**
* Used to view region location information for a single HBase table. * Used to view region location information for a single HBase table.
@ -57,6 +58,14 @@ public interface RegionLocator extends Closeable {
public HRegionLocation getRegionLocation(final byte [] row, boolean reload) public HRegionLocation getRegionLocation(final byte [] row, boolean reload)
throws IOException; throws IOException;
/**
* Retrieves all of the regions associated with this table.
* @return a {@link List} of all regions associated with this table.
* @throws IOException if a remote or network exception occurs
*/
public List<HRegionLocation> getAllRegionLocations()
throws IOException;
/** /**
* Gets the starting row key for every region in the currently open table. * Gets the starting row key for every region in the currently open table.
* <p> * <p>

View File

@ -77,7 +77,7 @@ public class RegionsResource extends ResourceBase {
TableName tableName = TableName.valueOf(tableResource.getName()); TableName tableName = TableName.valueOf(tableResource.getName());
TableInfoModel model = new TableInfoModel(tableName.getNameAsString()); TableInfoModel model = new TableInfoModel(tableName.getNameAsString());
Map<HRegionInfo,ServerName> regions = MetaScanner.allTableRegions( Map<HRegionInfo,ServerName> regions = MetaScanner.allTableRegions(
servlet.getConfiguration(), null, tableName, false); servlet.getConfiguration(), null, tableName);
for (Map.Entry<HRegionInfo,ServerName> e: regions.entrySet()) { for (Map.Entry<HRegionInfo,ServerName> e: regions.entrySet()) {
HRegionInfo hri = e.getKey(); HRegionInfo hri = e.getKey();
ServerName addr = e.getValue(); ServerName addr = e.getValue();

View File

@ -157,7 +157,8 @@ public abstract class MultiTableInputFormatBase extends
byte[] startRow = scan.getStartRow(); byte[] startRow = scan.getStartRow();
byte[] stopRow = scan.getStopRow(); byte[] stopRow = scan.getStopRow();
RegionSizeCalculator sizeCalculator = new RegionSizeCalculator((HTable) table); RegionSizeCalculator sizeCalculator = new RegionSizeCalculator(
regionLocator, conn.getAdmin());
for (int i = 0; i < keys.getFirst().length; i++) { for (int i = 0; i < keys.getFirst().length; i++) {
if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) { if (!includeRegionInSplit(keys.getFirst()[i], keys.getSecond()[i])) {
@ -184,7 +185,7 @@ public abstract class MultiTableInputFormatBase extends
long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName()); long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName());
TableSplit split = TableSplit split =
new TableSplit(table.getName(), new TableSplit(regionLocator.getName(),
scan, splitStart, splitStop, regionHostname, regionSize); scan, splitStart, splitStop, regionHostname, regionSize);
splits.add(split); splits.add(split);

View File

@ -109,6 +109,8 @@ implements Configurable {
this.conf = configuration; this.conf = configuration;
TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE)); TableName tableName = TableName.valueOf(conf.get(INPUT_TABLE));
try { try {
// TODO: Replace setHTable() with initializeTable() once we have
// a clean method of closing a connection.
setHTable(new HTable(new Configuration(conf), tableName)); setHTable(new HTable(new Configuration(conf), tableName));
} catch (Exception e) { } catch (Exception e) {
LOG.error(StringUtils.stringifyException(e)); LOG.error(StringUtils.stringifyException(e));

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionLocator;
@ -286,14 +287,27 @@ extends InputFormat<ImmutableBytesWritable, Result> {
* Allows subclasses to set the {@link HTable}. * Allows subclasses to set the {@link HTable}.
* *
* @param table The table to get the data from. * @param table The table to get the data from.
* @throws IOExceptfion
* @deprecated Use {@link #initializeTable(Connection, TableName)} instead. * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
*/ */
@Deprecated @Deprecated
protected void setHTable(HTable table) { protected void setHTable(HTable table) throws IOException {
this.table = table; this.table = table;
this.regionLocator = table; this.regionLocator = table;
} }
/**
* Allows subclasses to initalize the table information.
*
* @param connection The {@link Connection} to the HBase cluster.
* @param tableName The {@link TableName} of the table to process.
* @throws IOExceptfion
*/
protected void initializeTable(Connection connection, TableName tableName) throws IOException {
this.table = connection.getTable(tableName);
this.regionLocator = connection.getRegionLocator(tableName);
}
/** /**
* Gets the scan defining the actual details like columns etc. * Gets the scan defining the actual details like columns etc.
* *

View File

@ -17,29 +17,31 @@
*/ */
package org.apache.hadoop.hbase.util; package org.apache.hadoop.hbase.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet; import java.util.TreeSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
/** /**
* Computes size of each region for given table and given column families. * Computes size of each region for given table and given column families.
* The value is used by MapReduce for better scheduling. * The value is used by MapReduce for better scheduling.
@ -59,57 +61,65 @@ public class RegionSizeCalculator {
/** /**
* Computes size of each region for table and given column families. * Computes size of each region for table and given column families.
* */ *
* @deprecated Use {@link #RegionSizeCalculator(RegionLocator, Admin)} instead.
*/
@Deprecated
public RegionSizeCalculator(HTable table) throws IOException { public RegionSizeCalculator(HTable table) throws IOException {
this(table, new HBaseAdmin(table.getConfiguration())); HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
}
/** ctor for unit testing */
RegionSizeCalculator (HTable table, Admin admin) throws IOException {
try { try {
if (!enabled(table.getConfiguration())) { init(table, admin);
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + new String(table.getTableName()) + "\".");
//get regions for table
Set<HRegionInfo> tableRegionInfos = table.getRegionLocations().keySet();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionInfo regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
} finally { } finally {
admin.close(); admin.close();
} }
}
/**
* Computes size of each region for table and given column families.
* */
public RegionSizeCalculator(RegionLocator regionLocator, Admin admin) throws IOException {
init(regionLocator, admin);
}
private void init(RegionLocator regionLocator, Admin admin)
throws IOException {
if (!enabled(admin.getConfiguration())) {
LOG.info("Region size calculation disabled.");
return;
}
LOG.info("Calculating region sizes for table \"" + regionLocator.getName() + "\".");
//get regions for table
List<HRegionLocation> tableRegionInfos = regionLocator.getAllRegionLocations();
Set<byte[]> tableRegions = new TreeSet<byte[]>(Bytes.BYTES_COMPARATOR);
for (HRegionLocation regionInfo : tableRegionInfos) {
tableRegions.add(regionInfo.getRegionInfo().getRegionName());
}
ClusterStatus clusterStatus = admin.getClusterStatus();
Collection<ServerName> servers = clusterStatus.getServers();
final long megaByte = 1024L * 1024L;
//iterate all cluster regions, filter regions from our table and compute their size
for (ServerName serverName: servers) {
ServerLoad serverLoad = clusterStatus.getLoad(serverName);
for (RegionLoad regionLoad: serverLoad.getRegionsLoad().values()) {
byte[] regionId = regionLoad.getName();
if (tableRegions.contains(regionId)) {
long regionSizeBytes = regionLoad.getStorefileSizeMB() * megaByte;
sizeMap.put(regionId, regionSizeBytes);
if (LOG.isDebugEnabled()) {
LOG.debug("Region " + regionLoad.getNameAsString() + " has size " + regionSizeBytes);
}
}
}
}
LOG.debug("Region sizes calculated");
} }
boolean enabled(Configuration configuration) { boolean enabled(Configuration configuration) {

View File

@ -210,8 +210,7 @@ public class TestMultiVersions {
NavigableMap<HRegionInfo, ServerName> locations = table.getRegionLocations(); NavigableMap<HRegionInfo, ServerName> locations = table.getRegionLocations();
assertEquals(2, locations.size()); assertEquals(2, locations.size());
int index = 0; int index = 0;
for (Map.Entry<HRegionInfo, ServerName> e: locations.entrySet()) { for (HRegionInfo hri: locations.keySet()) {
HRegionInfo hri = e.getKey();
if (index == 0) { if (index == 0) {
assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey())); assertTrue(Bytes.equals(HConstants.EMPTY_START_ROW, hri.getStartKey()));
assertTrue(Bytes.equals(hri.getEndKey(), splitRows[0])); assertTrue(Bytes.equals(hri.getEndKey(), splitRows[0]));

View File

@ -189,7 +189,7 @@ public class TestMetaScanner {
while(!isStopped()) { while(!isStopped()) {
try { try {
NavigableMap<HRegionInfo, ServerName> regions = NavigableMap<HRegionInfo, ServerName> regions =
MetaScanner.allTableRegions(TEST_UTIL.getConfiguration(), null, TABLENAME, false); MetaScanner.allTableRegions(TEST_UTIL.getConfiguration(), null, TABLENAME);
LOG.info("-------"); LOG.info("-------");
byte[] lastEndKey = HConstants.EMPTY_START_ROW; byte[] lastEndKey = HConstants.EMPTY_START_ROW;

View File

@ -253,7 +253,7 @@ public class TestEndToEndSplitTransaction {
Random random = new Random(); Random random = new Random();
for (int i= 0; i< 5; i++) { for (int i= 0; i< 5; i++) {
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, null, NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, null,
tableName, false); tableName);
if (regions.size() == 0) { if (regions.size() == 0) {
continue; continue;
} }
@ -326,7 +326,7 @@ public class TestEndToEndSplitTransaction {
//MetaScanner.allTableRegions() //MetaScanner.allTableRegions()
NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, null, NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(conf, null,
tableName, false); tableName);
verifyTableRegions(regions.keySet()); verifyTableRegions(regions.keySet());
//MetaScanner.listAllRegions() //MetaScanner.listAllRegions()

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.RegionLoad; import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.ServerLoad; import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
@ -27,6 +28,8 @@ import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -35,7 +38,6 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap; import java.util.TreeMap;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -51,7 +53,7 @@ public class TestRegionSizeCalculator {
@Test @Test
public void testSimpleTestCase() throws Exception { public void testSimpleTestCase() throws Exception {
HTable table = mockTable("region1", "region2", "region3"); RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3");
Admin admin = mockAdmin( Admin admin = mockAdmin(
mockServer( mockServer(
@ -64,7 +66,7 @@ public class TestRegionSizeCalculator {
) )
); );
RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
assertEquals(123 * megabyte, calculator.getRegionSize("region1".getBytes())); assertEquals(123 * megabyte, calculator.getRegionSize("region1".getBytes()));
assertEquals(54321 * megabyte, calculator.getRegionSize("region2".getBytes())); assertEquals(54321 * megabyte, calculator.getRegionSize("region2".getBytes()));
@ -83,7 +85,7 @@ public class TestRegionSizeCalculator {
@Test @Test
public void testLargeRegion() throws Exception { public void testLargeRegion() throws Exception {
HTable table = mockTable("largeRegion"); RegionLocator regionLocator = mockRegionLocator("largeRegion");
Admin admin = mockAdmin( Admin admin = mockAdmin(
mockServer( mockServer(
@ -91,7 +93,7 @@ public class TestRegionSizeCalculator {
) )
); );
RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin);
assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes())); assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes()));
} }
@ -100,7 +102,7 @@ public class TestRegionSizeCalculator {
@Test @Test
public void testDisabled() throws Exception { public void testDisabled() throws Exception {
String regionName = "cz.goout:/index.html"; String regionName = "cz.goout:/index.html";
HTable table = mockTable(regionName); RegionLocator table = mockRegionLocator(regionName);
Admin admin = mockAdmin( Admin admin = mockAdmin(
mockServer( mockServer(
@ -123,29 +125,29 @@ public class TestRegionSizeCalculator {
/** /**
* Makes some table with given region names. * Makes some table with given region names.
* */ * */
private HTable mockTable(String... regionNames) throws IOException { private RegionLocator mockRegionLocator(String... regionNames) throws IOException {
HTable mockedTable = Mockito.mock(HTable.class); RegionLocator mockedTable = Mockito.mock(RegionLocator.class);
when(mockedTable.getConfiguration()).thenReturn(configuration); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable"));
when(mockedTable.getTableName()).thenReturn("sizeTestTable".getBytes()); List<HRegionLocation> regionLocations = new ArrayList<>();
NavigableMap<HRegionInfo, ServerName> regionLocations = new TreeMap<HRegionInfo, ServerName>(); when(mockedTable.getAllRegionLocations()).thenReturn(regionLocations);
when(mockedTable.getRegionLocations()).thenReturn(regionLocations);
for (String regionName : regionNames) { for (String regionName : regionNames) {
HRegionInfo info = Mockito.mock(HRegionInfo.class); HRegionInfo info = Mockito.mock(HRegionInfo.class);
when(info.getRegionName()).thenReturn(regionName.getBytes()); when(info.getRegionName()).thenReturn(regionName.getBytes());
regionLocations.put(info, null);//we are not interested in values regionLocations.add(new HRegionLocation(info, null));//we are not interested in values
} }
return mockedTable; return mockedTable;
} }
/** /**
* Creates mock returing ClusterStatus info about given servers. * Creates mock returning ClusterStatus info about given servers.
*/ */
private Admin mockAdmin(ServerLoad... servers) throws Exception { private Admin mockAdmin(ServerLoad... servers) throws Exception {
//get clusterstatus //get clusterstatus
Admin mockAdmin = Mockito.mock(HBaseAdmin.class); Admin mockAdmin = Mockito.mock(Admin.class);
ClusterStatus clusterStatus = mockCluster(servers); ClusterStatus clusterStatus = mockCluster(servers);
when(mockAdmin.getConfiguration()).thenReturn(configuration);
when(mockAdmin.getClusterStatus()).thenReturn(clusterStatus); when(mockAdmin.getClusterStatus()).thenReturn(clusterStatus);
return mockAdmin; return mockAdmin;
} }