HBASE-7541 Convert all tests that use HBaseTestingUtility.createMultiRegions to HBA.createTable (Jonathan Lawlor)

This commit is contained in:
stack 2015-01-13 22:10:12 -08:00
parent 1723245282
commit 608025ae67
28 changed files with 369 additions and 334 deletions

View File

@ -17,6 +17,34 @@
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.UnknownHostException;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -94,34 +122,6 @@ import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.ZooKeeper.States;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.Socket;
import java.net.UnknownHostException;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* Facility for testing HBase. Replacement for
* old HBaseTestCase and HBaseClusterTestCase functionality.
@ -1243,6 +1243,24 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return createTable(tableName, new byte[][]{family});
}
/**
* Create a table with multiple regions.
* @param tableName
* @param family
* @param numRegions
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte[] startKey = Bytes.toBytes("aaaaa");
byte[] endKey = Bytes.toBytes("zzzzz");
byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
return createTable(tableName, new byte[][] { family }, splitKeys);
}
/**
* Create a table.
@ -1261,13 +1279,36 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* Create a table.
* @param tableName
* @param families
* @return An HT
* able instance for the created table.
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(TableName tableName, byte[][] families)
throws IOException {
return createTable(tableName, families, new Configuration(getConfiguration()));
return createTable(tableName, families, (byte[][]) null);
}
/**
* Create a table with multiple regions.
* @param tableName
* @param families
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createMultiRegionTable(TableName tableName, byte[][] families) throws IOException {
return createTable(tableName, families, KEYS_FOR_HBA_CREATE_TABLE);
}
/**
* Create a table.
* @param tableName
* @param families
* @param splitKeys
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys)
throws IOException {
return createTable(tableName, families, splitKeys, new Configuration(getConfiguration()));
}
public HTable createTable(byte[] tableName, byte[][] families,
@ -1307,7 +1348,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
*/
public HTable createTable(HTableDescriptor htd, byte[][] families, Configuration c)
throws IOException {
for(byte[] family : families) {
return createTable(htd, families, (byte[][]) null, c);
}
/**
* Create a table.
* @param htd
* @param families
* @param splitKeys
* @param c Configuration to use
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(HTableDescriptor htd, byte[][] families, byte[][] splitKeys,
Configuration c) throws IOException {
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them here because
// tests have hard coded counts of what to expect in block cache, etc., and blooms being
@ -1315,10 +1370,11 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
hcd.setBloomFilterType(BloomType.NONE);
htd.addFamily(hcd);
}
getHBaseAdmin().createTable(htd);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
getHBaseAdmin().createTable(htd, splitKeys);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are
// assigned
waitUntilAllRegionsAssigned(htd.getTableName());
return (HTable)getConnection().getTable(htd.getTableName());
return (HTable) getConnection().getTable(htd.getTableName());
}
/**
@ -1347,7 +1403,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public HTable createTable(TableName tableName, byte[][] families,
final Configuration c)
throws IOException {
return createTable(new HTableDescriptor(tableName), families, c);
return createTable(tableName, families, (byte[][]) null, c);
}
/**
* Create a table.
* @param tableName
* @param families
* @param splitKeys
* @param c Configuration to use
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(TableName tableName, byte[][] families, byte[][] splitKeys,
final Configuration c) throws IOException {
return createTable(new HTableDescriptor(tableName), families, splitKeys, c);
}
/**
@ -1471,15 +1541,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
public HTable createTable(TableName tableName, byte[][] families,
int numVersions)
throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
desc.addFamily(hcd);
}
getHBaseAdmin().createTable(desc);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
waitUntilAllRegionsAssigned(tableName);
return (HTable) getConnection().getTable(tableName);
return createTable(tableName, families, numVersions, (byte[][]) null);
}
/**
@ -1487,6 +1549,42 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @param tableName
* @param families
* @param numVersions
* @param splitKeys
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createTable(TableName tableName, byte[][] families, int numVersions,
byte[][] splitKeys) throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
desc.addFamily(hcd);
}
getHBaseAdmin().createTable(desc, splitKeys);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
waitUntilAllRegionsAssigned(tableName);
return (HTable) getConnection().getTable(tableName);
}
/**
* Create a table with multiple regions.
* @param tableName
* @param families
* @param numVersions
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createMultiRegionTable(TableName tableName, byte[][] families, int numVersions)
throws IOException {
return createTable(tableName, families, numVersions, KEYS_FOR_HBA_CREATE_TABLE);
}
/**
* Create a table.
* @param tableName
* @param families
* @param numVersions
* @param blockSize
* @return An HTable instance for the created table.
* @throws IOException
*/
@ -1501,6 +1599,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
* @param tableName
* @param families
* @param numVersions
* @param blockSize
* @return An HTable instance for the created table.
* @throws IOException
*/
@ -1590,6 +1689,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return (HTable) getConnection().getTable(tableName);
}
/**
* Create a table with multiple regions.
* @param tableName
* @param family
* @return An HTable instance for the created table.
* @throws IOException
*/
public HTable createMultiRegionTable(TableName tableName, byte[] family) throws IOException {
return createTable(tableName, family, KEYS_FOR_HBA_CREATE_TABLE);
}
/**
* Create a table.
* @param tableName
@ -2122,19 +2232,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
return digest.toString();
}
/**
* Creates many regions names "aaa" to "zzz".
*
* @param table The table to use for the data.
* @param columnFamily The family to insert the data into.
* @return count of regions created.
* @throws IOException When creating the regions fails.
*/
public int createMultiRegions(HTable table, byte[] columnFamily)
throws IOException {
return createMultiRegions(getConfiguration(), table, columnFamily);
}
/** All the row values for the data loaded by {@link #loadTable(HTable, byte[])} */
public static final byte[][] ROWS = new byte[(int) Math.pow('z' - 'a' + 1, 3)][3]; // ~52KB
static {
@ -2175,97 +2272,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
Bytes.toBytes("xxx"), Bytes.toBytes("yyy"), Bytes.toBytes("zzz")
};
/**
* Creates many regions names "aaa" to "zzz".
* @param c Configuration to use.
* @param table The table to use for the data.
* @param columnFamily The family to insert the data into.
* @return count of regions created.
* @throws IOException When creating the regions fails.
*/
public int createMultiRegions(final Configuration c, final HTable table,
final byte[] columnFamily)
throws IOException {
return createMultiRegions(c, table, columnFamily, KEYS);
}
/**
* Creates the specified number of regions in the specified table.
* @param c
* @param table
* @param family
* @param numRegions
* @return
* @throws IOException
*/
public int createMultiRegions(final Configuration c, final HTable table,
final byte [] family, int numRegions)
throws IOException {
if (numRegions < 3) throw new IOException("Must create at least 3 regions");
byte [] startKey = Bytes.toBytes("aaaaa");
byte [] endKey = Bytes.toBytes("zzzzz");
byte [][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);
byte [][] regionStartKeys = new byte[splitKeys.length+1][];
System.arraycopy(splitKeys, 0, regionStartKeys, 1, splitKeys.length);
regionStartKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
return createMultiRegions(c, table, family, regionStartKeys);
}
public int createMultiRegions(final Configuration c, final HTable table,
final byte[] columnFamily, byte [][] startKeys)
throws IOException {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
try (Table meta = new HTable(c, TableName.META_TABLE_NAME)) {
HTableDescriptor htd = table.getTableDescriptor();
if(!htd.hasFamily(columnFamily)) {
HColumnDescriptor hcd = new HColumnDescriptor(columnFamily);
htd.addFamily(hcd);
}
// remove empty region - this is tricky as the mini cluster during the test
// setup already has the "<tablename>,,123456789" row with an empty start
// and end key. Adding the custom regions below adds those blindly,
// including the new start region from empty to "bbb". lg
List<byte[]> rows = getMetaTableRows(htd.getTableName());
String regionToDeleteInFS = table
.getRegionsInRange(Bytes.toBytes(""), Bytes.toBytes("")).get(0)
.getRegionInfo().getEncodedName();
List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
// add custom ones
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(table.getName(),
startKeys[i], startKeys[j]);
MetaTableAccessor.addRegionToMeta(meta, hri);
newRegions.add(hri);
count++;
}
// see comment above, remove "old" (or previous) single region
for (byte[] row : rows) {
LOG.info("createMultiRegions: deleting meta row -> " +
Bytes.toStringBinary(row));
meta.delete(new Delete(row));
}
// remove the "old" region from FS
Path tableDir = new Path(getDefaultRootDirPath().toString()
+ System.getProperty("file.separator") + htd.getTableName()
+ System.getProperty("file.separator") + regionToDeleteInFS);
FileSystem.get(c).delete(tableDir, true);
// flush cache of regions
HConnection conn = table.getConnection();
conn.clearRegionCache();
// assign all the new regions IF table is enabled.
Admin admin = conn.getAdmin();
if (admin.isTableEnabled(table.getName())) {
for(HRegionInfo hri : newRegions) {
admin.assign(hri.getRegionName());
}
}
return count;
}
}
/**
* Create rows in hbase:meta for regions of the specified table with the specified
* start keys. The first startKey should be a 0 length byte array if you

View File

@ -91,10 +91,7 @@ public class TestFullLogReconstruction {
*/
@Test (timeout=300000)
public void testReconstruction() throws Exception {
HTable table = TEST_UTIL.createTable(TABLE_NAME, FAMILY);
TEST_UTIL.createMultiRegions(table, Bytes.toBytes("family"));
HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
// Load up the table with simple rows and count them
int initialCount = TEST_UTIL.loadTable(table, FAMILY);

View File

@ -18,18 +18,21 @@
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -73,9 +76,11 @@ public class TestGlobalMemStoreSize {
byte [] table = Bytes.toBytes("TestGlobalMemStoreSize");
byte [] family = Bytes.toBytes("family");
LOG.info("Creating table with " + regionNum + " regions");
HTable ht = TEST_UTIL.createTable(TableName.valueOf(table), family);
int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family,
regionNum);
HTable ht = TEST_UTIL.createMultiRegionTable(TableName.valueOf(table), family, regionNum);
int numRegions = -1;
try (RegionLocator r = ht.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
assertEquals(regionNum,numRegions);
waitForAllRegionsAssigned();

View File

@ -33,9 +33,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -87,8 +87,11 @@ public class TestMetaTableAccessor {
final TableName name =
TableName.valueOf("testRetrying");
LOG.info("Started " + name);
HTable t = UTIL.createTable(name, HConstants.CATALOG_FAMILY);
int regionCount = UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
HTable t = UTIL.createMultiRegionTable(name, HConstants.CATALOG_FAMILY);
int regionCount = -1;
try (RegionLocator r = t.getRegionLocator()) {
regionCount = r.getStartKeys().length;
}
// Test it works getting a region from just made user table.
final List<HRegionInfo> regions =
testGettingTableRegions(connection, name, regionCount);

View File

@ -716,8 +716,7 @@ public class TestAdmin2 {
final TableName tableName = TableName.valueOf("testGetRegion");
LOG.info("Started " + tableName);
HTable t = TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY);
TEST_UTIL.createMultiRegions(t, HConstants.CATALOG_FAMILY);
HTable t = TEST_UTIL.createMultiRegionTable(tableName, HConstants.CATALOG_FAMILY);
HRegionLocation regionLocation = t.getRegionLocation("mmm");
HRegionInfo region = regionLocation.getRegionInfo();

View File

@ -27,6 +27,22 @@ import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.lang.ArrayUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -89,23 +105,6 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
/**
* Run tests that use the HBase clients; {@link HTable}.
* Sets up the HBase mini cluster once at start and runs through all client tests.
@ -4985,17 +4984,18 @@ public class TestFromClientSide {
// Set up test table:
// Create table:
HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILY);
// Create multiple regions for this table
int numOfRegions = TEST_UTIL.createMultiRegions(ht, FAMILY);
// Create 3 rows in the table, with rowkeys starting with "z*" so that
HTable ht = TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILY);
int numOfRegions = -1;
try (RegionLocator r = ht.getRegionLocator()) {
numOfRegions = r.getStartKeys().length;
}
// Create 3 rows in the table, with rowkeys starting with "zzz*" so that
// scan are forced to hit all the regions.
Put put1 = new Put(Bytes.toBytes("z1"));
Put put1 = new Put(Bytes.toBytes("zzz1"));
put1.add(FAMILY, QUALIFIER, VALUE);
Put put2 = new Put(Bytes.toBytes("z2"));
Put put2 = new Put(Bytes.toBytes("zzz2"));
put2.add(FAMILY, QUALIFIER, VALUE);
Put put3 = new Put(Bytes.toBytes("z3"));
Put put3 = new Put(Bytes.toBytes("zzz3"));
put3.add(FAMILY, QUALIFIER, VALUE);
ht.put(Arrays.asList(put1, put2, put3));
@ -5245,9 +5245,12 @@ public class TestFromClientSide {
byte [] startKey = Bytes.toBytes("ddc");
byte [] endKey = Bytes.toBytes("mmm");
TableName TABLE = TableName.valueOf("testGetRegionsInRange");
HTable table = TEST_UTIL.createTable(TABLE, new byte[][] {FAMILY}, 10);
int numOfRegions = TEST_UTIL.createMultiRegions(table, FAMILY);
assertEquals(25, numOfRegions);
HTable table = TEST_UTIL.createMultiRegionTable(TABLE, new byte[][] { FAMILY }, 10);
int numOfRegions = -1;
try (RegionLocator r = table.getRegionLocator()) {
numOfRegions = r.getStartKeys().length;
}
assertEquals(26, numOfRegions);
// Get the regions in this range
List<HRegionLocation> regionsList = table.getRegionsInRange(startKey,
@ -5270,22 +5273,22 @@ public class TestFromClientSide {
// Empty end key
regionsList = table.getRegionsInRange(startKey, HConstants.EMPTY_END_ROW);
assertEquals(20, regionsList.size());
assertEquals(21, regionsList.size());
// Both start and end keys empty
regionsList = table.getRegionsInRange(HConstants.EMPTY_START_ROW,
HConstants.EMPTY_END_ROW);
assertEquals(25, regionsList.size());
assertEquals(26, regionsList.size());
// Change the end key to somewhere in the last block
endKey = Bytes.toBytes("yyz");
endKey = Bytes.toBytes("zzz1");
regionsList = table.getRegionsInRange(startKey, endKey);
assertEquals(20, regionsList.size());
assertEquals(21, regionsList.size());
// Change the start key to somewhere in the first block
startKey = Bytes.toBytes("aac");
regionsList = table.getRegionsInRange(startKey, endKey);
assertEquals(25, regionsList.size());
assertEquals(26, regionsList.size());
// Make start and end key the same
startKey = endKey = Bytes.toBytes("ccc");

View File

@ -52,8 +52,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@ -71,6 +69,8 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
@ -601,13 +601,12 @@ public class TestHCM {
*/
@Test
public void testRegionCaching() throws Exception{
TEST_UTIL.createTable(TABLE_NAME, FAM_NAM).close();
TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAM_NAM).close();
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
Connection connection = ConnectionFactory.createConnection(conf);
final HTable table = (HTable) connection.getTable(TABLE_NAME);
TEST_UTIL.createMultiRegions(table, FAM_NAM);
TEST_UTIL.waitUntilAllRegionsAssigned(table.getName());
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
@ -809,8 +808,7 @@ public class TestHCM {
*/
@Test(timeout = 60000)
public void testCacheSeqNums() throws Exception{
HTable table = TEST_UTIL.createTable(TABLE_NAME2, FAM_NAM);
TEST_UTIL.createMultiRegions(table, FAM_NAM);
HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME2, FAM_NAM);
Put put = new Put(ROW);
put.add(FAM_NAM, ROW, ROW);
table.put(put);
@ -1023,9 +1021,8 @@ public class TestHCM {
@Test (timeout=30000)
public void testMulti() throws Exception {
HTable table = TEST_UTIL.createTable(TABLE_NAME3, FAM_NAM);
HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME3, FAM_NAM);
try {
TEST_UTIL.createMultiRegions(table, FAM_NAM);
ConnectionManager.HConnectionImplementation conn =
( ConnectionManager.HConnectionImplementation)table.getConnection();

View File

@ -32,13 +32,12 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -73,14 +72,11 @@ public class TestMetaScanner {
setUp();
final TableName TABLENAME = TableName.valueOf("testMetaScanner");
final byte[] FAMILY = Bytes.toBytes("family");
TEST_UTIL.createTable(TABLENAME, FAMILY);
Configuration conf = TEST_UTIL.getConfiguration();
final byte[][] SPLIT_KEYS =
new byte[][] { Bytes.toBytes("region_a"), Bytes.toBytes("region_b") };
TEST_UTIL.createTable(TABLENAME, FAMILY, SPLIT_KEYS);
HTable table = (HTable) connection.getTable(TABLENAME);
TEST_UTIL.createMultiRegions(conf, table, FAMILY,
new byte[][]{
HConstants.EMPTY_START_ROW,
Bytes.toBytes("region_a"),
Bytes.toBytes("region_b")});
// Make sure all the regions are deployed
TEST_UTIL.countRows(table);

View File

@ -75,8 +75,7 @@ public class TestMultiParallel {
//((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.ALL);
//((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
UTIL.startMiniCluster(slaves);
HTable t = UTIL.createTable(TEST_TABLE, Bytes.toBytes(FAMILY));
UTIL.createMultiRegions(t, Bytes.toBytes(FAMILY));
HTable t = UTIL.createMultiRegionTable(TEST_TABLE, Bytes.toBytes(FAMILY));
UTIL.waitTableEnabled(TEST_TABLE);
t.close();
CONNECTION = ConnectionFactory.createConnection(UTIL.getConfiguration());

View File

@ -23,11 +23,13 @@ import static org.junit.Assert.assertEquals;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter;
import org.apache.hadoop.hbase.filter.Filter;
@ -80,10 +82,9 @@ public class TestAggregateProtocol {
"org.apache.hadoop.hbase.coprocessor.AggregateImplementation");
util.startMiniCluster(2);
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
new byte[][] { HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1],
ROWS[rowSeperator2] });
final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1],
ROWS[rowSeperator2] };
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
/**
* The testtable has one CQ which is always populated and one variable CQ
* for each row rowkey1: CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2

View File

@ -18,15 +18,19 @@
package org.apache.hadoop.hbase.coprocessor;
import static org.junit.Assert.assertEquals;
import java.math.BigDecimal;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.coprocessor.AggregationClient;
import org.apache.hadoop.hbase.client.coprocessor.BigDecimalColumnInterpreter;
import org.apache.hadoop.hbase.filter.Filter;
@ -78,9 +82,8 @@ public class TestBigDecimalColumnInterpreter {
"org.apache.hadoop.hbase.coprocessor.AggregateImplementation");
util.startMiniCluster(2);
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY, new byte[][] {
HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2] });
final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
/**
* The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
* CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2

View File

@ -80,9 +80,8 @@ public class TestDoubleColumnInterpreter {
"org.apache.hadoop.hbase.coprocessor.AggregateImplementation");
util.startMiniCluster(2);
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY, new byte[][] {
HConstants.EMPTY_BYTE_ARRAY, ROWS[rowSeperator1], ROWS[rowSeperator2] });
final byte[][] SPLIT_KEYS = new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] };
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
/**
* The testtable has one CQ which is always populated and one variable CQ for each row rowkey1:
* CF:CQ CF:CQ1 rowKey2: CF:CQ CF:CQ2

View File

@ -1524,10 +1524,9 @@ public class TestMasterObserver {
cp.enableBypass(false);
cp.resetStates();
HTable table = UTIL.createTable(TEST_TABLE, TEST_FAMILY);
HTable table = UTIL.createMultiRegionTable(TEST_TABLE, TEST_FAMILY);
try {
UTIL.createMultiRegions(table, TEST_FAMILY);
UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
NavigableMap<HRegionInfo, ServerName> regions = table.getRegionLocations();

View File

@ -19,27 +19,31 @@
package org.apache.hadoop.hbase.coprocessor;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.junit.Assert;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.*;
/**
* Tests unhandled exceptions thrown by coprocessors running on a regionserver..
* Expected result is that the regionserver will abort with an informative
@ -93,8 +97,7 @@ public class TestRegionServerCoprocessorExceptionWithAbort {
// hosts the region we attempted to write to) to abort.
final byte[] TEST_FAMILY = Bytes.toBytes("aaa");
HTable table = TEST_UTIL.createTable(TABLE_NAME, TEST_FAMILY);
TEST_UTIL.createMultiRegions(table, TEST_FAMILY);
HTable table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, TEST_FAMILY);
TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
// Note which regionServer will abort (after put is attempted).

View File

@ -19,25 +19,28 @@
package org.apache.hadoop.hbase.coprocessor;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.*;
/**
* Tests unhandled exceptions thrown by coprocessors running on regionserver.
* Expected result is that the region server will remove the buggy coprocessor from
@ -91,12 +94,10 @@ public class TestRegionServerCoprocessorExceptionWithRemove {
// execute, which will set the rsZKNodeDeleted flag to true, which will
// pass this test.
TableName TEST_TABLE =
TableName.valueOf("observed_table");
TableName TEST_TABLE = TableName.valueOf("observed_table");
byte[] TEST_FAMILY = Bytes.toBytes("aaa");
HTable table = TEST_UTIL.createTable(TEST_TABLE, TEST_FAMILY);
TEST_UTIL.createMultiRegions(table, TEST_FAMILY);
HTable table = TEST_UTIL.createMultiRegionTable(TEST_TABLE, TEST_FAMILY);
TEST_UTIL.waitUntilAllRegionsAssigned(TEST_TABLE);
// Note which regionServer that should survive the buggy coprocessor's
// prePut().

View File

@ -24,7 +24,6 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.Arrays;
@ -54,11 +53,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HadoopShims;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.PerformanceEvaluation;
import org.apache.hadoop.hbase.TableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HRegionLocator;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -359,6 +356,16 @@ public class TestHFileOutputFormat {
return ret;
}
private byte[][] generateRandomSplitKeys(int numKeys) {
Random random = new Random();
byte[][] ret = new byte[numKeys][];
for (int i = 0; i < numKeys; i++) {
ret[i] =
PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH);
}
return ret;
}
@Test
public void testMRIncrementalLoad() throws Exception {
LOG.info("\nStarting test testMRIncrementalLoad\n");
@ -375,17 +382,19 @@ public class TestHFileOutputFormat {
boolean shouldChangeRegions) throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
byte[][] startKeys = generateRandomStartKeys(5);
byte[][] splitKeys = generateRandomSplitKeys(4);
HBaseAdmin admin = null;
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
admin = util.getHBaseAdmin();
HTable table = util.createTable(TABLE_NAME, FAMILIES);
HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
assertEquals("Should start with empty table",
0, util.countRows(table));
int numRegions = util.createMultiRegions(
util.getConfiguration(), table, FAMILIES[0], startKeys);
int numRegions = -1;
try(RegionLocator r = table.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
assertEquals("Should make 5 regions", numRegions, 5);
// Generate the bulk load files
@ -416,10 +425,9 @@ public class TestHFileOutputFormat {
Threads.sleep(200);
LOG.info("Waiting on table to finish disabling");
}
byte[][] newStartKeys = generateRandomStartKeys(15);
util.createMultiRegions(
util.getConfiguration(), table, FAMILIES[0], newStartKeys);
admin.enableTable(table.getName());
util.deleteTable(table.getName());
byte[][] newSplitKeys = generateRandomSplitKeys(14);
table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
while (table.getRegionLocations().size() != 15 ||
!admin.isTableAvailable(table.getName())) {
Thread.sleep(200);
@ -1057,12 +1065,8 @@ public class TestHFileOutputFormat {
util = new HBaseTestingUtility(conf);
if ("newtable".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
HTable table = util.createTable(tname, FAMILIES);
HBaseAdmin admin = new HBaseAdmin(conf);
admin.disableTable(tname);
byte[][] startKeys = generateRandomStartKeys(5);
util.createMultiRegions(conf, table, FAMILIES[0], startKeys);
admin.enableTable(tname);
byte[][] splitKeys = generateRandomSplitKeys(4);
HTable table = util.createTable(tname, FAMILIES, splitKeys);
} else if ("incremental".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
HTable table = (HTable) util.getConnection().getTable(tname);

View File

@ -25,6 +25,16 @@ import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -80,16 +90,6 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.Callable;
/**
* Simple test for {@link CellSortReducer} and {@link HFileOutputFormat2}.
* Sets up and runs a mapreduce job that writes hfile output.
@ -358,6 +358,16 @@ public class TestHFileOutputFormat2 {
return ret;
}
private byte[][] generateRandomSplitKeys(int numKeys) {
Random random = new Random();
byte[][] ret = new byte[numKeys][];
for (int i = 0; i < numKeys; i++) {
ret[i] =
PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH);
}
return ret;
}
@Test
public void testMRIncrementalLoad() throws Exception {
LOG.info("\nStarting test testMRIncrementalLoad\n");
@ -374,15 +384,18 @@ public class TestHFileOutputFormat2 {
boolean shouldChangeRegions) throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
byte[][] startKeys = generateRandomStartKeys(5);
byte[][] splitKeys = generateRandomSplitKeys(4);
util.startMiniCluster();
try (HTable table = util.createTable(TABLE_NAME, FAMILIES);
Admin admin = table.getConnection().getAdmin()) {
try {
HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
Admin admin = table.getConnection().getAdmin();
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
assertEquals("Should start with empty table",
0, util.countRows(table));
int numRegions = util.createMultiRegions(
util.getConfiguration(), table, FAMILIES[0], startKeys);
int numRegions = -1;
try (RegionLocator r = table.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
assertEquals("Should make 5 regions", numRegions, 5);
// Generate the bulk load files
@ -413,10 +426,10 @@ public class TestHFileOutputFormat2 {
Threads.sleep(200);
LOG.info("Waiting on table to finish disabling");
}
byte[][] newStartKeys = generateRandomStartKeys(15);
util.createMultiRegions(
util.getConfiguration(), table, FAMILIES[0], newStartKeys);
admin.enableTable(table.getName());
util.deleteTable(table.getName());
byte[][] newSplitKeys = generateRandomSplitKeys(14);
table = util.createTable(TABLE_NAME, FAMILIES, newSplitKeys);
while (table.getRegionLocator().getAllRegionLocations().size() != 15 ||
!admin.isTableAvailable(table.getName())) {
Thread.sleep(200);
@ -1061,12 +1074,8 @@ public class TestHFileOutputFormat2 {
util = new HBaseTestingUtility(conf);
if ("newtable".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);
try (HTable table = util.createTable(tname, FAMILIES);
Admin admin = table.getConnection().getAdmin()) {
admin.disableTable(tname);
byte[][] startKeys = generateRandomStartKeys(5);
util.createMultiRegions(conf, table, FAMILIES[0], startKeys);
admin.enableTable(tname);
byte[][] splitKeys = generateRandomSplitKeys(4);
try (HTable table = util.createTable(tname, FAMILIES, splitKeys)) {
}
} else if ("incremental".equals(args[0])) {
TableName tname = TableName.valueOf(args[1]);

View File

@ -77,8 +77,8 @@ public class TestMultiTableInputFormat {
// create and fill table
for (int i = 0; i < 3; i++) {
try (HTable table =
TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)), INPUT_FAMILY)) {
TEST_UTIL.createMultiRegions(TEST_UTIL.getConfiguration(), table, INPUT_FAMILY, 4);
TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)),
INPUT_FAMILY, 4)) {
TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
}
}

View File

@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hbase.mapreduce;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
@ -25,10 +28,13 @@ import java.util.NavigableMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -46,9 +52,6 @@ import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.fail;
import static org.junit.Assert.assertTrue;
/**
* Test Map/Reduce job over HBase tables. The map/reduce process we're testing
* on our tables is simple - take every row in the table, reverse the value of
@ -67,8 +70,9 @@ public class TestMultithreadedTableMapper {
@BeforeClass
public static void beforeClass() throws Exception {
UTIL.startMiniCluster();
HTable table = UTIL.createTable(MULTI_REGION_TABLE_NAME, new byte[][] {INPUT_FAMILY, OUTPUT_FAMILY});
UTIL.createMultiRegions(table, INPUT_FAMILY);
HTable table =
UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
OUTPUT_FAMILY });
UTIL.loadTable(table, INPUT_FAMILY, false);
UTIL.startMiniMapReduceCluster();
UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME);

View File

@ -111,7 +111,7 @@ public class TestTableInputFormatScan1 extends TestTableInputFormatScanBase {
*/
@Test
public void testGetSplits() throws IOException, InterruptedException, ClassNotFoundException {
testNumOfSplits("-1", 50);
testNumOfSplits("-1", 52);
testNumOfSplits("100", 1);
}

View File

@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
@ -83,8 +82,7 @@ public abstract class TestTableInputFormatScanBase {
// start mini hbase cluster
TEST_UTIL.startMiniCluster(3);
// create and fill table
table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME), INPUT_FAMILY);
TEST_UTIL.createMultiRegions(table, INPUT_FAMILY);
table = TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME), INPUT_FAMILY);
TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
// start MR cluster
TEST_UTIL.startMiniMapReduceCluster();

View File

@ -77,8 +77,8 @@ public abstract class TestTableMapReduceBase {
public static void beforeClass() throws Exception {
UTIL.startMiniCluster();
HTable table =
UTIL.createTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY });
UTIL.createMultiRegions(table, INPUT_FAMILY);
UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY,
OUTPUT_FAMILY });
UTIL.loadTable(table, INPUT_FAMILY, false);
UTIL.startMiniMapReduceCluster();
}

View File

@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.NonceGenerator;
import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Table;
@ -91,11 +92,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -104,6 +100,10 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@ -1433,8 +1433,11 @@ public class TestDistributedLogSplitting {
TableName table = TableName.valueOf(tname);
byte [] family = Bytes.toBytes(fname);
LOG.info("Creating table with " + nrs + " regions");
HTable ht = TEST_UTIL.createTable(table, family);
int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family, nrs);
HTable ht = TEST_UTIL.createMultiRegionTable(table, family, nrs);
int numRegions = -1;
try (RegionLocator r = ht.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
assertEquals(nrs, numRegions);
LOG.info("Waiting for no more RIT\n");
blockUntilNoRIT(zkw, master);

View File

@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -67,9 +67,11 @@ public class TestMasterRestartAfterDisablingTable {
TableName table = TableName.valueOf("tableRestart");
byte[] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family);
int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family,
NUM_REGIONS_TO_CREATE);
HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
int numRegions = -1;
try (RegionLocator r = ht.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
numRegions += 1; // catalogs
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);

View File

@ -22,16 +22,17 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -64,9 +65,12 @@ public class TestMasterTransitions {
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
TEST_UTIL.startMiniCluster(2);
// Create a table of three families. This will assign a region.
TEST_UTIL.createTable(TABLENAME, FAMILIES);
TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
HTable t = (HTable) TEST_UTIL.getConnection().getTable(TABLENAME);
int countOfRegions = TEST_UTIL.createMultiRegions(t, getTestFamily());
int countOfRegions = -1;
try (RegionLocator r = t.getRegionLocator()) {
countOfRegions = r.getStartKeys().length;
}
TEST_UTIL.waitUntilAllRegionsAssigned(TABLENAME);
addToEachStartKey(countOfRegions);
t.close();

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -75,9 +76,11 @@ public class TestRollingRestart {
TableName table = TableName.valueOf("tableRestart");
byte [] family = Bytes.toBytes("family");
log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
HTable ht = TEST_UTIL.createTable(table, family);
int numRegions = TEST_UTIL.createMultiRegions(conf, ht, family,
NUM_REGIONS_TO_CREATE);
HTable ht = TEST_UTIL.createMultiRegionTable(table, family, NUM_REGIONS_TO_CREATE);
int numRegions = -1;
try (RegionLocator r = ht.getRegionLocator()) {
numRegions = r.getStartKeys().length;
}
numRegions += 1; // catalogs
log("Waiting for no more RIT\n");
TEST_UTIL.waitUntilNoRegionsInTransition(60000);

View File

@ -31,8 +31,8 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -72,8 +72,7 @@ public class TestRegionFavoredNodes {
return;
}
TEST_UTIL.startMiniCluster(REGION_SERVERS);
table = TEST_UTIL.createTable(TABLE_NAME, COLUMN_FAMILY);
TEST_UTIL.createMultiRegions(table, COLUMN_FAMILY);
table = TEST_UTIL.createMultiRegionTable(TABLE_NAME, COLUMN_FAMILY);
TEST_UTIL.waitUntilAllRegionsAssigned(TABLE_NAME);
}

View File

@ -31,13 +31,10 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -59,6 +56,8 @@ import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.NoopRes
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingRequest;
import org.apache.hadoop.hbase.coprocessor.protobuf.generated.PingProtos.PingResponse;
import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
@ -156,9 +155,8 @@ public class TestServerCustomProtocol {
@Before
public void before() throws Exception {
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
new byte[][]{ HConstants.EMPTY_BYTE_ARRAY, ROW_B, ROW_C});
final byte[][] SPLIT_KEYS = new byte[][] { ROW_B, ROW_C };
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY, SPLIT_KEYS);
Put puta = new Put( ROW_A );
puta.add(TEST_FAMILY, Bytes.toBytes("col1"), Bytes.toBytes(1));