HBASE-22548 Split TestAdmin1

This commit is contained in:
zhangduo 2019-06-08 13:51:28 +08:00 committed by Apache9
parent 1b3914067e
commit 80281127cb
5 changed files with 1162 additions and 1150 deletions

View File

@ -0,0 +1,550 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.apache.hadoop.hbase.HBaseTestingUtility.countRows;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ LargeTests.class, ClientTests.class })
public class TestAdmin extends TestAdminBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin.class);
private static final Logger LOG = LoggerFactory.getLogger(TestAdmin.class);
@Test
public void testCreateTable() throws IOException {
List<TableDescriptor> tables = ADMIN.listTableDescriptors();
int numTables = tables.size();
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
tables = ADMIN.listTableDescriptors();
assertEquals(numTables + 1, tables.size());
assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
.getTableStateManager().isTableState(tableName, TableState.State.ENABLED));
assertEquals(TableState.State.ENABLED, getStateFromMeta(tableName));
}
@Test
public void testTruncateTable() throws IOException {
testTruncateTable(TableName.valueOf(name.getMethodName()), false);
}
@Test
public void testTruncateTablePreservingSplits() throws IOException {
testTruncateTable(TableName.valueOf(name.getMethodName()), true);
}
private void testTruncateTable(final TableName tableName, boolean preserveSplits)
throws IOException {
byte[][] splitKeys = new byte[2][];
splitKeys[0] = Bytes.toBytes(4);
splitKeys[1] = Bytes.toBytes(8);
// Create & Fill the table
Table table = TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY, splitKeys);
try {
TEST_UTIL.loadNumericRows(table, HConstants.CATALOG_FAMILY, 0, 10);
assertEquals(10, countRows(table));
} finally {
table.close();
}
assertEquals(3, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
// Truncate & Verify
ADMIN.disableTable(tableName);
ADMIN.truncateTable(tableName, preserveSplits);
table = TEST_UTIL.getConnection().getTable(tableName);
try {
assertEquals(0, countRows(table));
} finally {
table.close();
}
if (preserveSplits) {
assertEquals(3, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
} else {
assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
}
}
@Test
public void testCreateTableNumberOfRegions() throws IOException, InterruptedException {
TableName table = TableName.valueOf(name.getMethodName());
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY);
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table).setColumnFamily(cfd).build());
List<HRegionLocation> regions;
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 1 region", 1, regions.size());
}
TableName table2 = TableName.valueOf(table.getNameAsString() + "_2");
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table2).setColumnFamily(cfd).build(),
new byte[][] { new byte[] { 42 } });
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table2)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 2 region", 2, regions.size());
}
TableName table3 = TableName.valueOf(table.getNameAsString() + "_3");
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table3).setColumnFamily(cfd).build(),
Bytes.toBytes("a"), Bytes.toBytes("z"), 3);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table3)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have only 3 region", 3, regions.size());
}
TableName table4 = TableName.valueOf(table.getNameAsString() + "_4");
try {
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table4).setColumnFamily(cfd).build(),
Bytes.toBytes("a"), Bytes.toBytes("z"), 2);
fail("Should not be able to create a table with only 2 regions using this API.");
} catch (IllegalArgumentException eae) {
// Expected
}
TableName table5 = TableName.valueOf(table.getNameAsString() + "_5");
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table5).setColumnFamily(cfd).build(),
new byte[] { 1 }, new byte[] { 127 }, 16);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table5)) {
regions = l.getAllRegionLocations();
assertEquals("Table should have 16 region", 16, regions.size());
}
}
@Test
public void testCreateTableWithRegions() throws IOException, InterruptedException {
TableName table = TableName.valueOf(name.getMethodName());
ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY);
byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 },
new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 },
new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 }, };
int expectedRegions = splitKeys.length + 1;
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table).setColumnFamily(cfd).build(),
splitKeys);
boolean tableAvailable = ADMIN.isTableAvailable(table);
assertTrue("Table should be created with splitKyes + 1 rows in META", tableAvailable);
List<HRegionLocation> regions;
Iterator<HRegionLocation> hris;
RegionInfo hri;
ClusterConnection conn = (ClusterConnection) TEST_UTIL.getConnection();
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table)) {
regions = l.getAllRegionLocations();
assertEquals(
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
expectedRegions, regions.size());
System.err.println("Found " + regions.size() + " regions");
hris = regions.iterator();
hri = hris.next().getRegion();
assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[0]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[0]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[1]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[1]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[2]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[2]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[3]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[3]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[4]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[4]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[5]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[5]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[6]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[6]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[7]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[7]));
assertTrue(Bytes.equals(hri.getEndKey(), splitKeys[8]));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), splitKeys[8]));
assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
verifyRoundRobinDistribution(conn, l, expectedRegions);
}
// Now test using start/end with a number of regions
// Use 80 bit numbers to make sure we aren't limited
byte[] startKey = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
byte[] endKey = { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 };
// Splitting into 10 regions, we expect (null,1) ... (9, null)
// with (1,2) (2,3) (3,4) (4,5) (5,6) (6,7) (7,8) (8,9) in the middle
expectedRegions = 10;
TableName table2 = TableName.valueOf(table.getNameAsString() + "_2");
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table2).setColumnFamily(cfd).build(),
startKey, endKey, expectedRegions);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table2)) {
regions = l.getAllRegionLocations();
assertEquals(
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
expectedRegions, regions.size());
System.err.println("Found " + regions.size() + " regions");
hris = regions.iterator();
hri = hris.next().getRegion();
assertTrue(hri.getStartKey() == null || hri.getStartKey().length == 0);
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 8, 8, 8, 8, 8, 8, 8, 8, 8, 8 }));
assertTrue(Bytes.equals(hri.getEndKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
hri = hris.next().getRegion();
assertTrue(Bytes.equals(hri.getStartKey(), new byte[] { 9, 9, 9, 9, 9, 9, 9, 9, 9, 9 }));
assertTrue(hri.getEndKey() == null || hri.getEndKey().length == 0);
verifyRoundRobinDistribution(conn, l, expectedRegions);
}
// Try once more with something that divides into something infinite
startKey = new byte[] { 0, 0, 0, 0, 0, 0 };
endKey = new byte[] { 1, 0, 0, 0, 0, 0 };
expectedRegions = 5;
TableName table3 = TableName.valueOf(table.getNameAsString() + "_3");
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table3).setColumnFamily(cfd).build(),
startKey, endKey, expectedRegions);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(table3)) {
regions = l.getAllRegionLocations();
assertEquals(
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
expectedRegions, regions.size());
System.err.println("Found " + regions.size() + " regions");
verifyRoundRobinDistribution(conn, l, expectedRegions);
}
// Try an invalid case where there are duplicate split keys
splitKeys = new byte[][] { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 },
new byte[] { 3, 3, 3 }, new byte[] { 2, 2, 2 } };
TableName table4 = TableName.valueOf(table.getNameAsString() + "_4");
try {
ADMIN.createTable(TableDescriptorBuilder.newBuilder(table4).setColumnFamily(cfd).build(),
splitKeys);
assertTrue("Should not be able to create this table because of " + "duplicate split keys",
false);
} catch (IllegalArgumentException iae) {
// Expected
}
}
@Test
public void testCreateTableWithOnlyEmptyStartRow() throws IOException {
final byte[] tableName = Bytes.toBytes(name.getMethodName());
byte[][] splitKeys = new byte[1][];
splitKeys[0] = HConstants.EMPTY_BYTE_ARRAY;
TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build();
try {
ADMIN.createTable(desc, splitKeys);
fail("Test case should fail as empty split key is passed.");
} catch (IllegalArgumentException e) {
}
}
@Test
public void testCreateTableWithEmptyRowInTheSplitKeys() throws IOException {
final byte[] tableName = Bytes.toBytes(name.getMethodName());
byte[][] splitKeys = new byte[3][];
splitKeys[0] = Bytes.toBytes("region1");
splitKeys[1] = HConstants.EMPTY_BYTE_ARRAY;
splitKeys[2] = Bytes.toBytes("region2");
TableDescriptor desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("col")).build();
try {
ADMIN.createTable(desc, splitKeys);
fail("Test case should fail as empty split key is passed.");
} catch (IllegalArgumentException e) {
LOG.info("Expected ", e);
}
}
private void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator,
int expectedRegions) throws IOException {
int numRS = c.getCurrentNrHRS();
List<HRegionLocation> regions = regionLocator.getAllRegionLocations();
Map<ServerName, List<RegionInfo>> server2Regions = new HashMap<>();
for (HRegionLocation loc : regions) {
ServerName server = loc.getServerName();
List<RegionInfo> regs = server2Regions.get(server);
if (regs == null) {
regs = new ArrayList<>();
server2Regions.put(server, regs);
}
regs.add(loc.getRegion());
}
boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
if (tablesOnMaster) {
// Ignore the master region server,
// which contains less regions by intention.
numRS--;
}
float average = (float) expectedRegions / numRS;
int min = (int) Math.floor(average);
int max = (int) Math.ceil(average);
for (List<RegionInfo> regionList : server2Regions.values()) {
assertTrue(
"numRS=" + numRS + ", min=" + min + ", max=" + max + ", size=" + regionList.size() +
", tablesOnMaster=" + tablesOnMaster,
regionList.size() == min || regionList.size() == max);
}
}
@Test
public void testCloneTableSchema() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
testCloneTableSchema(tableName, newTableName, false);
}
@Test
public void testCloneTableSchemaPreservingSplits() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
testCloneTableSchema(tableName, newTableName, true);
}
private void testCloneTableSchema(final TableName tableName, final TableName newTableName,
boolean preserveSplits) throws Exception {
byte[] FAMILY_0 = Bytes.toBytes("cf0");
byte[] FAMILY_1 = Bytes.toBytes("cf1");
byte[][] splitKeys = new byte[2][];
splitKeys[0] = Bytes.toBytes(4);
splitKeys[1] = Bytes.toBytes(8);
int NUM_FAMILYS = 2;
int NUM_REGIONS = 3;
int BLOCK_SIZE = 1024;
int TTL = 86400;
boolean BLOCK_CACHE = false;
// Create the table
TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_0))
.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY_1).setBlocksize(BLOCK_SIZE)
.setBlockCacheEnabled(BLOCK_CACHE).setTimeToLive(TTL).build())
.build();
ADMIN.createTable(tableDesc, splitKeys);
assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(tableName).size());
assertTrue("Table should be created with splitKyes + 1 rows in META",
ADMIN.isTableAvailable(tableName));
// clone & Verify
ADMIN.cloneTableSchema(tableName, newTableName, preserveSplits);
TableDescriptor newTableDesc = ADMIN.getDescriptor(newTableName);
assertEquals(NUM_FAMILYS, newTableDesc.getColumnFamilyCount());
assertEquals(BLOCK_SIZE, newTableDesc.getColumnFamily(FAMILY_1).getBlocksize());
assertEquals(BLOCK_CACHE, newTableDesc.getColumnFamily(FAMILY_1).isBlockCacheEnabled());
assertEquals(TTL, newTableDesc.getColumnFamily(FAMILY_1).getTimeToLive());
TEST_UTIL.verifyTableDescriptorIgnoreTableName(tableDesc, newTableDesc);
if (preserveSplits) {
assertEquals(NUM_REGIONS, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
assertTrue("New table should be created with splitKyes + 1 rows in META",
ADMIN.isTableAvailable(newTableName));
} else {
assertEquals(1, TEST_UTIL.getHBaseCluster().getRegions(newTableName).size());
}
}
@Test
public void testCloneTableSchemaWithNonExistentSourceTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
// test for non-existent source table
try {
ADMIN.cloneTableSchema(tableName, newTableName, false);
fail("Should have failed to create a new table by cloning non-existent source table.");
} catch (TableNotFoundException ex) {
// expected
}
}
@Test
public void testCloneTableSchemaWithExistentDestinationTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final TableName newTableName = TableName.valueOf(tableName.getNameAsString() + "_new");
byte[] FAMILY_0 = Bytes.toBytes("cf0");
TEST_UTIL.createTable(tableName, FAMILY_0);
TEST_UTIL.createTable(newTableName, FAMILY_0);
// test for existent destination table
try {
ADMIN.cloneTableSchema(tableName, newTableName, false);
fail("Should have failed to create a existent table.");
} catch (TableExistsException ex) {
// expected
}
}
@Test
public void testModifyTableOnTableWithRegionReplicas() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf")))
.setRegionReplication(5).build();
ADMIN.createTable(desc);
int maxFileSize = 10000000;
TableDescriptor newDesc =
TableDescriptorBuilder.newBuilder(desc).setMaxFileSize(maxFileSize).build();
ADMIN.modifyTable(newDesc);
TableDescriptor newTableDesc = ADMIN.getDescriptor(tableName);
assertEquals(maxFileSize, newTableDesc.getMaxFileSize());
}
/**
* Verify schema modification takes.
*/
@Test
public void testOnlineChangeTableSchema() throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf(name.getMethodName());
List<TableDescriptor> tables = ADMIN.listTableDescriptors();
int numTables = tables.size();
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
tables = ADMIN.listTableDescriptors();
assertEquals(numTables + 1, tables.size());
// FIRST, do htabledescriptor changes.
TableDescriptor htd = ADMIN.getDescriptor(tableName);
// Make a copy and assert copy is good.
TableDescriptor copy = TableDescriptorBuilder.newBuilder(htd).build();
assertEquals(htd, copy);
String key = "anyoldkey";
assertNull(htd.getValue(key));
// Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <= 0) {
newFlushSize = TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy = TableDescriptorBuilder.newBuilder(copy).setMemStoreFlushSize(newFlushSize)
.setValue(key, key).build();
ADMIN.modifyTable(copy);
TableDescriptor modifiedHtd = ADMIN.getDescriptor(tableName);
assertNotEquals(htd, modifiedHtd);
assertEquals(copy, modifiedHtd);
assertEquals(newFlushSize, modifiedHtd.getMemStoreFlushSize());
assertEquals(key, modifiedHtd.getValue(key));
// Now work on column family changes.
int countOfFamilies = modifiedHtd.getColumnFamilyCount();
assertTrue(countOfFamilies > 0);
ColumnFamilyDescriptor hcd = modifiedHtd.getColumnFamilies()[0];
int maxversions = hcd.getMaxVersions();
int newMaxVersions = maxversions + 1;
hcd = ColumnFamilyDescriptorBuilder.newBuilder(hcd).setMaxVersions(newMaxVersions).build();
byte[] hcdName = hcd.getName();
ADMIN.modifyColumnFamily(tableName, hcd);
modifiedHtd = ADMIN.getDescriptor(tableName);
ColumnFamilyDescriptor modifiedHcd = modifiedHtd.getColumnFamily(hcdName);
assertEquals(newMaxVersions, modifiedHcd.getMaxVersions());
// Try adding a column
assertFalse(ADMIN.isTableDisabled(tableName));
String xtracolName = "xtracol";
ColumnFamilyDescriptor xtracol = ColumnFamilyDescriptorBuilder
.newBuilder(Bytes.toBytes(xtracolName)).setValue(xtracolName, xtracolName).build();
ADMIN.addColumnFamily(tableName, xtracol);
modifiedHtd = ADMIN.getDescriptor(tableName);
hcd = modifiedHtd.getColumnFamily(xtracol.getName());
assertNotNull(hcd);
assertEquals(xtracolName, Bytes.toString(hcd.getValue(Bytes.toBytes(xtracolName))));
// Delete the just-added column.
ADMIN.deleteColumnFamily(tableName, xtracol.getName());
modifiedHtd = ADMIN.getDescriptor(tableName);
hcd = modifiedHtd.getColumnFamily(xtracol.getName());
assertNull(hcd);
// Delete the table
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
ADMIN.listTableDescriptors();
assertFalse(ADMIN.tableExists(tableName));
}
}

View File

@ -33,7 +33,6 @@ import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
@ -60,15 +59,10 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -80,42 +74,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
* Add any testing of HBaseAdmin functionality here.
*/
@Category({LargeTests.class, ClientTests.class})
public class TestAdmin2 {
public class TestAdmin2 extends TestAdminBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestAdmin2.class);
private static final Logger LOG = LoggerFactory.getLogger(TestAdmin2.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Admin ADMIN;
@Rule
public TestName name = new TestName();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 30);
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 30);
TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
TEST_UTIL.startMiniCluster(3);
ADMIN = TEST_UTIL.getAdmin();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@After
public void tearDown() throws Exception {
for (TableDescriptor htd : ADMIN.listTableDescriptors()) {
TEST_UTIL.deleteTable(htd.getTableName());
}
}
@Test
public void testCreateBadTables() throws IOException {
@ -409,7 +374,6 @@ public class TestAdmin2 {
/**
* For HBASE-2556
* @throws IOException
*/
@Test
public void testGetTableRegions() throws IOException {

View File

@ -0,0 +1,409 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.List;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({ LargeTests.class, ClientTests.class })
public class TestAdmin3 extends TestAdminBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAdmin3.class);
private static final Logger LOG = LoggerFactory.getLogger(TestAdmin3.class);
@Test
public void testDisableAndEnableTable() throws IOException {
final byte[] row = Bytes.toBytes("row");
final byte[] qualifier = Bytes.toBytes("qualifier");
final byte[] value = Bytes.toBytes("value");
final TableName table = TableName.valueOf(name.getMethodName());
Table ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY);
Put put = new Put(row);
put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value);
ht.put(put);
Get get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
ht.get(get);
ADMIN.disableTable(ht.getName());
assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster().getMaster()
.getTableStateManager().isTableState(ht.getName(), TableState.State.DISABLED));
assertEquals(TableState.State.DISABLED, getStateFromMeta(table));
// Test that table is disabled
get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
boolean ok = false;
try {
ht.get(get);
} catch (TableNotEnabledException e) {
ok = true;
}
ok = false;
// verify that scan encounters correct exception
Scan scan = new Scan();
try {
ResultScanner scanner = ht.getScanner(scan);
Result res = null;
do {
res = scanner.next();
} while (res != null);
} catch (TableNotEnabledException e) {
ok = true;
}
assertTrue(ok);
ADMIN.enableTable(table);
assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster().getMaster()
.getTableStateManager().isTableState(ht.getName(), TableState.State.ENABLED));
assertEquals(TableState.State.ENABLED, getStateFromMeta(table));
// Test that table is enabled
try {
ht.get(get);
} catch (RetriesExhaustedException e) {
ok = false;
}
assertTrue(ok);
ht.close();
}
@Test
public void testDisableAndEnableTables() throws IOException {
final byte[] row = Bytes.toBytes("row");
final byte[] qualifier = Bytes.toBytes("qualifier");
final byte[] value = Bytes.toBytes("value");
final TableName table1 = TableName.valueOf(name.getMethodName() + "1");
final TableName table2 = TableName.valueOf(name.getMethodName() + "2");
Table ht1 = TEST_UTIL.createTable(table1, HConstants.CATALOG_FAMILY);
Table ht2 = TEST_UTIL.createTable(table2, HConstants.CATALOG_FAMILY);
Put put = new Put(row);
put.addColumn(HConstants.CATALOG_FAMILY, qualifier, value);
ht1.put(put);
ht2.put(put);
Get get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
ht1.get(get);
ht2.get(get);
TableName[] tableNames = ADMIN.listTableNames(Pattern.compile("testDisableAndEnableTable.*"));
for (TableName tableName : tableNames) {
ADMIN.disableTable(tableName);
}
// Test that tables are disabled
get = new Get(row);
get.addColumn(HConstants.CATALOG_FAMILY, qualifier);
boolean ok = false;
try {
ht1.get(get);
ht2.get(get);
} catch (org.apache.hadoop.hbase.DoNotRetryIOException e) {
ok = true;
}
assertEquals(TableState.State.DISABLED, getStateFromMeta(table1));
assertEquals(TableState.State.DISABLED, getStateFromMeta(table2));
assertTrue(ok);
for (TableName tableName : tableNames) {
ADMIN.enableTable(tableName);
}
// Test that tables are enabled
try {
ht1.get(get);
} catch (IOException e) {
ok = false;
}
try {
ht2.get(get);
} catch (IOException e) {
ok = false;
}
assertTrue(ok);
ht1.close();
ht2.close();
assertEquals(TableState.State.ENABLED, getStateFromMeta(table1));
assertEquals(TableState.State.ENABLED, getStateFromMeta(table2));
}
/**
* Test retain assignment on enableTable.
*/
@Test
public void testEnableTableRetainAssignment() throws IOException {
final TableName tableName = TableName.valueOf(name.getMethodName());
byte[][] splitKeys = { new byte[] { 1, 1, 1 }, new byte[] { 2, 2, 2 }, new byte[] { 3, 3, 3 },
new byte[] { 4, 4, 4 }, new byte[] { 5, 5, 5 }, new byte[] { 6, 6, 6 },
new byte[] { 7, 7, 7 }, new byte[] { 8, 8, 8 }, new byte[] { 9, 9, 9 } };
int expectedRegions = splitKeys.length + 1;
TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
ADMIN.createTable(desc, splitKeys);
try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
List<HRegionLocation> regions = l.getAllRegionLocations();
assertEquals(
"Tried to create " + expectedRegions + " regions " + "but only found " + regions.size(),
expectedRegions, regions.size());
// Disable table.
ADMIN.disableTable(tableName);
// Enable table, use retain assignment to assign regions.
ADMIN.enableTable(tableName);
List<HRegionLocation> regions2 = l.getAllRegionLocations();
// Check the assignment.
assertEquals(regions.size(), regions2.size());
assertTrue(regions2.containsAll(regions));
}
}
@Test
public void testEnableDisableAddColumnDeleteColumn() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
while (!ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName()))) {
Thread.sleep(10);
}
ADMIN.disableTable(tableName);
try {
TEST_UTIL.getConnection().getTable(tableName);
} catch (org.apache.hadoop.hbase.DoNotRetryIOException e) {
// expected
}
ADMIN.addColumnFamily(tableName, ColumnFamilyDescriptorBuilder.of("col2"));
ADMIN.enableTable(tableName);
try {
ADMIN.deleteColumnFamily(tableName, Bytes.toBytes("col2"));
} catch (TableNotDisabledException e) {
LOG.info(e.toString(), e);
}
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
}
@Test
public void testGetTableDescriptor() throws IOException {
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam1"))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam2"))
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("fam3")).build();
ADMIN.createTable(htd);
Table table = TEST_UTIL.getConnection().getTable(htd.getTableName());
TableDescriptor confirmedHtd = table.getDescriptor();
assertEquals(0, TableDescriptor.COMPARATOR.compare(htd, confirmedHtd));
MetaTableAccessor.fullScanMetaAndPrint(TEST_UTIL.getConnection());
table.close();
}
/**
* Verify schema change for read only table
*/
@Test
public void testReadOnlyTableModify() throws IOException, InterruptedException {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
// Make table read only
TableDescriptor htd =
TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName)).setReadOnly(true).build();
ADMIN.modifyTable(htd);
// try to modify the read only table now
htd = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName))
.setCompactionEnabled(false).build();
ADMIN.modifyTable(htd);
// Delete the table
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
assertFalse(ADMIN.tableExists(tableName));
}
@Test(expected = TableNotDisabledException.class)
public void testModifyRegionReplicasEnabledTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
// Modify region replication count
TableDescriptor htd = TableDescriptorBuilder.newBuilder(ADMIN.getDescriptor(tableName))
.setRegionReplication(3).build();
try {
// try to modify the region replication count without disabling the table
ADMIN.modifyTable(htd);
fail("Expected an exception");
} finally {
// Delete the table
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
assertFalse(ADMIN.tableExists(tableName));
}
}
@Test
public void testDeleteLastColumnFamily() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
while (!ADMIN.isTableEnabled(TableName.valueOf(name.getMethodName()))) {
Thread.sleep(10);
}
// test for enabled table
try {
ADMIN.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY);
fail("Should have failed to delete the only column family of a table");
} catch (InvalidFamilyOperationException ex) {
// expected
}
// test for disabled table
ADMIN.disableTable(tableName);
try {
ADMIN.deleteColumnFamily(tableName, HConstants.CATALOG_FAMILY);
fail("Should have failed to delete the only column family of a table");
} catch (InvalidFamilyOperationException ex) {
// expected
}
ADMIN.deleteTable(tableName);
}
@Test
public void testDeleteEditUnknownColumnFamilyAndOrTable() throws IOException {
// Test we get exception if we try to
final TableName nonexistentTable = TableName.valueOf("nonexistent");
final byte[] nonexistentColumn = Bytes.toBytes("nonexistent");
ColumnFamilyDescriptor nonexistentHcd = ColumnFamilyDescriptorBuilder.of(nonexistentColumn);
Exception exception = null;
try {
ADMIN.addColumnFamily(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
ADMIN.deleteTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
ADMIN.deleteColumnFamily(nonexistentTable, nonexistentColumn);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
ADMIN.disableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
ADMIN.enableTable(nonexistentTable);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
ADMIN.modifyColumnFamily(nonexistentTable, nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
exception = null;
try {
TableDescriptor htd = TableDescriptorBuilder.newBuilder(nonexistentTable)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(HConstants.CATALOG_FAMILY)).build();
ADMIN.modifyTable(htd);
} catch (IOException e) {
exception = e;
}
assertTrue(exception instanceof TableNotFoundException);
// Now make it so at least the table exists and then do tests against a
// nonexistent column family -- see if we get right exceptions.
final TableName tableName =
TableName.valueOf(name.getMethodName() + System.currentTimeMillis());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build();
ADMIN.createTable(htd);
try {
exception = null;
try {
ADMIN.deleteColumnFamily(htd.getTableName(), nonexistentHcd.getName());
} catch (IOException e) {
exception = e;
}
assertTrue("found=" + exception.getClass().getName(),
exception instanceof InvalidFamilyOperationException);
exception = null;
try {
ADMIN.modifyColumnFamily(htd.getTableName(), nonexistentHcd);
} catch (IOException e) {
exception = e;
}
assertTrue("found=" + exception.getClass().getName(),
exception instanceof InvalidFamilyOperationException);
} finally {
ADMIN.disableTable(tableName);
ADMIN.deleteTable(tableName);
}
}
}

View File

@ -0,0 +1,70 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertNotNull;
import java.io.IOException;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.rules.TestName;
public class TestAdminBase {
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected static Admin ADMIN;
@Rule
public TestName name = new TestName();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 30);
TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 30);
TEST_UTIL.startMiniCluster(3);
ADMIN = TEST_UTIL.getAdmin();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@After
public void tearDown() throws Exception {
for (TableDescriptor htd : ADMIN.listTableDescriptors()) {
TEST_UTIL.deleteTable(htd.getTableName());
}
}
protected TableState.State getStateFromMeta(TableName table) throws IOException {
TableState state = MetaTableAccessor.getTableState(TEST_UTIL.getConnection(), table);
assertNotNull(state);
return state.getState();
}
}