HBASE-13322 Replace explicit HBaseAdmin creation with connection#getAdmin()

This commit is contained in:
Andrey Stepachev 2015-04-27 16:28:53 +01:00
parent f2e1238f98
commit d5ff2b587c
13 changed files with 131 additions and 153 deletions

View File

@ -528,7 +528,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
throw new IllegalStateException("No live regionservers");
}
int regionsPerServer = conf.getInt(HBaseTestingUtility.REGIONS_PER_SERVER_KEY,
HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER);
HBaseTestingUtility.DEFAULT_REGIONS_PER_SERVER);
int totalNumberOfRegions = numberOfServers * regionsPerServer;
LOG.info("Number of live regionservers: " + numberOfServers + ", " +
"pre-splitting table into " + totalNumberOfRegions + " regions " +

View File

@ -40,9 +40,9 @@ import org.apache.hadoop.hbase.chaos.factories.MonkeyFactory;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
@ -133,14 +133,15 @@ public class IntegrationTestBigLinkedListWithVisibility extends IntegrationTestB
if(!acl) {
LOG.info("No ACL available.");
}
Admin admin = new HBaseAdmin(getConf());
for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) {
TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i);
createTable(admin, tableName, false, acl);
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
for (int i = 0; i < DEFAULT_TABLES_COUNT; i++) {
TableName tableName = IntegrationTestBigLinkedListWithVisibility.getTableName(i);
createTable(admin, tableName, false, acl);
}
TableName tableName = TableName.valueOf(COMMON_TABLE_NAME);
createTable(admin, tableName, true, acl);
}
TableName tableName = TableName.valueOf(COMMON_TABLE_NAME);
createTable(admin, tableName, true, acl);
admin.close();
}
private void createTable(Admin admin, TableName tableName, boolean setVersion,

View File

@ -33,7 +33,8 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
@ -370,11 +371,9 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
HTableDescriptor htd = new HTableDescriptor(getTablename());
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
Admin admin = new HBaseAdmin(getConf());
try {
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
admin.createTable(htd, Bytes.toBytes(0L), Bytes.toBytes(-1L), numPresplits);
} finally {
admin.close();
}
doLoad(getConf(), htd);
doVerify(getConf(), htd);
@ -382,6 +381,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
return 0;
}
@SuppressWarnings("unchecked")
@Override
protected void processOptions(CommandLine cmd) {
List args = cmd.getArgList();

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -112,7 +111,7 @@ import java.util.concurrent.TimeUnit;
@InterfaceStability.Stable
public class LoadIncrementalHFiles extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(LoadIncrementalHFiles.class);
private Admin hbAdmin;
private boolean initalized = false;
public static final String NAME = "completebulkload";
public static final String MAX_FILES_PER_REGION_PER_FAMILY
@ -138,18 +137,19 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
}
private void initialize() throws Exception {
if (hbAdmin == null) {
// make a copy, just to be sure we're not overriding someone else's config
setConf(HBaseConfiguration.create(getConf()));
Configuration conf = getConf();
// disable blockcache for tool invocation, see HBASE-10500
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
this.hbAdmin = new HBaseAdmin(conf);
this.userProvider = UserProvider.instantiate(conf);
this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
if (initalized) {
return;
}
// make a copy, just to be sure we're not overriding someone else's config
setConf(HBaseConfiguration.create(getConf()));
Configuration conf = getConf();
// disable blockcache for tool invocation, see HBASE-10500
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
this.userProvider = UserProvider.instantiate(conf);
this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
initalized = true;
}
private void usage() {
@ -856,10 +856,6 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
return !HFile.isReservedFileInfoKey(key);
}
private boolean doesTableExist(TableName tableName) throws Exception {
return hbAdmin.tableExists(tableName);
}
/*
* Infers region boundaries for a new table.
* Parameter:
@ -894,7 +890,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
* If the table is created for the first time, then "completebulkload" reads the files twice.
* More modifications necessary if we want to avoid doing it.
*/
private void createTable(TableName tableName, String dirPath) throws Exception {
private void createTable(TableName tableName, String dirPath, Admin admin) throws Exception {
final Path hfofDir = new Path(dirPath);
final FileSystem fs = hfofDir.getFileSystem(getConf());
@ -942,7 +938,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
});
byte[][] keys = LoadIncrementalHFiles.inferBoundaries(map);
this.hbAdmin.createTable(htd,keys);
admin.createTable(htd, keys);
LOG.info("Table "+ tableName +" is available!!");
}
@ -955,26 +951,27 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
}
initialize();
String dirPath = args[0];
TableName tableName = TableName.valueOf(args[1]);
boolean tableExists = this.doesTableExist(tableName);
if (!tableExists) {
if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) {
this.createTable(tableName, dirPath);
} else {
String errorMsg = format("Table '%s' does not exist.", tableName);
LOG.error(errorMsg);
throw new TableNotFoundException(errorMsg);
}
}
Path hfofDir = new Path(dirPath);
try (Connection connection = ConnectionFactory.createConnection(getConf());
HTable table = (HTable) connection.getTable(tableName);) {
doBulkLoad(hfofDir, table);
Admin admin = connection.getAdmin()) {
String dirPath = args[0];
TableName tableName = TableName.valueOf(args[1]);
boolean tableExists = admin.tableExists(tableName);
if (!tableExists) {
if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) {
this.createTable(tableName, dirPath, admin);
} else {
String errorMsg = format("Table '%s' does not exist.", tableName);
LOG.error(errorMsg);
throw new TableNotFoundException(errorMsg);
}
}
Path hfofDir = new Path(dirPath);
try (HTable table = (HTable) connection.getTable(tableName);) {
doBulkLoad(hfofDir, table);
}
}
return 0;

View File

@ -28,7 +28,6 @@ import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
@ -37,8 +36,10 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.ServerCommandLine;
@ -249,12 +250,16 @@ public class HMasterCommandLine extends ServerCommandLine {
@SuppressWarnings("resource")
private int stopMaster() {
Admin adm = null;
try {
Configuration conf = getConf();
// Don't try more than once
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
adm = new HBaseAdmin(getConf());
Configuration conf = getConf();
// Don't try more than once
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
try (Connection connection = ConnectionFactory.createConnection(conf)) {
try (Admin admin = connection.getAdmin()) {
connection.getAdmin().shutdown();
} catch (Throwable t) {
LOG.error("Failed to stop master", t);
return 1;
}
} catch (MasterNotRunningException e) {
LOG.error("Master not running");
return 1;
@ -265,12 +270,6 @@ public class HMasterCommandLine extends ServerCommandLine {
LOG.error("Got IOException: " +e.getMessage(), e);
return 1;
}
try {
adm.shutdown();
} catch (Throwable t) {
LOG.error("Failed to stop master", t);
return 1;
}
return 0;
}

View File

@ -62,7 +62,6 @@ import com.google.common.collect.Multimap;
import com.google.common.collect.Ordering;
import com.google.common.collect.TreeMultimap;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
@ -97,10 +96,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
@ -3181,21 +3180,12 @@ public class HBaseFsck extends Configured implements Closeable {
HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
HTableDescriptor[] htd = new HTableDescriptor[0];
Admin admin = null;
try {
LOG.info("getHTableDescriptors == tableNames => " + tableNames);
admin = new HBaseAdmin(getConf());
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
htd = admin.getTableDescriptorsByTableName(tableNames);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
} finally {
if (admin != null) {
try {
admin.close();
} catch (IOException e) {
LOG.debug("Exception closing HBaseAdmin", e);
}
}
}
return htd;
}

View File

@ -26,7 +26,6 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@ -36,11 +35,11 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
@ -126,13 +125,11 @@ class HMerge {
throw new IllegalStateException(
"HBase instance must be running to merge a normal table");
}
Admin admin = new HBaseAdmin(conf);
try {
try (Connection conn = ConnectionFactory.createConnection(conf);
Admin admin = conn.getAdmin()) {
if (!admin.isTableDisabled(tableName)) {
throw new TableNotDisabledException(tableName);
}
} finally {
admin.close();
}
new OnlineMerger(conf, fs, tableName).process();
}

View File

@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.RegionLocator;
@ -66,11 +67,9 @@ public class RegionSizeCalculator {
*/
@Deprecated
public RegionSizeCalculator(HTable table) throws IOException {
HBaseAdmin admin = new HBaseAdmin(table.getConfiguration());
try {
try (Connection conn = ConnectionFactory.createConnection(table.getConfiguration());
Admin admin = conn.getAdmin()) {
init(table.getRegionLocator(), admin);
} finally {
admin.close();
}
}

View File

@ -21,7 +21,9 @@
import="static org.apache.commons.lang.StringEscapeUtils.escapeXml"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.client.HBaseAdmin"
import="org.apache.hadoop.hbase.client.Admin"
import="org.apache.hadoop.hbase.client.Connection"
import="org.apache.hadoop.hbase.client.ConnectionFactory"
import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.HBaseConfiguration" %>
<%
@ -81,7 +83,14 @@
</div>
</div>
<% HTableDescriptor[] tables = new HBaseAdmin(conf).listTables();
<% HTableDescriptor[] tables;
Connection connection = master.getConnection();
Admin admin = connection.getAdmin();
try {
tables = admin.listTables();
} finally {
admin.close();
}
if(tables != null && tables.length > 0) { %>
<table class="table table-striped">
<tr>

View File

@ -39,9 +39,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.ScannerCallable;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.ipc.AbstractRpcClient;
@ -61,7 +58,6 @@ import org.junit.experimental.categories.Category;
@Category({FilterTests.class, MediumTests.class})
public class FilterTestingCluster {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Connection connection;
private static Admin admin = null;
private static List<TableName> createdTables = new ArrayList<>();
@ -81,7 +77,7 @@ public class FilterTestingCluster {
}
protected static Table openTable(TableName tableName) throws IOException {
Table table = connection.getTable(tableName);
Table table = TEST_UTIL.getConnection().getTable(tableName);
assertTrue("Fail to create the table", admin.tableExists(tableName));
return table;
}
@ -105,8 +101,7 @@ public class FilterTestingCluster {
conf = HBaseConfiguration.create(conf);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
try {
connection = ConnectionFactory.createConnection(conf);
admin = connection.getAdmin();
admin = TEST_UTIL.getHBaseAdmin();
} catch (MasterNotRunningException e) {
assertNull("Master is not running", e);
} catch (ZooKeeperConnectionException e) {
@ -128,7 +123,6 @@ public class FilterTestingCluster {
@AfterClass
public static void tearDown() throws Exception {
deleteTables();
connection.close();
TEST_UTIL.shutdownMiniCluster();
}
}

View File

@ -383,11 +383,10 @@ public class TestHFileOutputFormat {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
byte[][] splitKeys = generateRandomSplitKeys(4);
HBaseAdmin admin = null;
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
admin = util.getHBaseAdmin();
HBaseAdmin admin = util.getHBaseAdmin();
HTable table = util.createTable(TABLE_NAME, FAMILIES, splitKeys);
assertEquals("Should start with empty table",
0, util.countRows(table));
@ -466,7 +465,6 @@ public class TestHFileOutputFormat {
assertEquals("Data should remain after reopening of regions",
tableDigestBefore, util.checksumRows(table));
} finally {
if (admin != null) admin.close();
util.shutdownMiniMapReduceCluster();
util.shutdownMiniCluster();
}
@ -908,7 +906,7 @@ public class TestHFileOutputFormat {
try {
util.startMiniCluster();
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HBaseAdmin admin = util.getHBaseAdmin();
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table", 0, util.countRows(table));

View File

@ -36,12 +36,10 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
@ -286,15 +284,8 @@ public class TestMasterReplication {
}
private void createTableOnClusters(HTableDescriptor table) throws Exception {
int numClusters = configurations.length;
for (int i = 0; i < numClusters; i++) {
Admin hbaseAdmin = null;
try {
hbaseAdmin = new HBaseAdmin(configurations[i]);
hbaseAdmin.createTable(table);
} finally {
close(hbaseAdmin);
}
for (HBaseTestingUtility utility : utilities) {
utility.getHBaseAdmin().createTable(table);
}
}

View File

@ -43,9 +43,9 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Cipher;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@ -266,45 +266,48 @@ public class LoadTestTool extends AbstractHBaseTool {
*/
protected void applyColumnFamilyOptions(TableName tableName,
byte[][] columnFamilies) throws IOException {
Admin admin = new HBaseAdmin(conf);
HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
LOG.info("Disabling table " + tableName);
admin.disableTable(tableName);
for (byte[] cf : columnFamilies) {
HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
boolean isNewCf = columnDesc == null;
if (isNewCf) {
columnDesc = new HColumnDescriptor(cf);
}
if (bloomType != null) {
columnDesc.setBloomFilterType(bloomType);
}
if (compressAlgo != null) {
columnDesc.setCompressionType(compressAlgo);
}
if (dataBlockEncodingAlgo != null) {
columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
}
if (inMemoryCF) {
columnDesc.setInMemory(inMemoryCF);
}
if (cipher != null) {
byte[] keyBytes = new byte[cipher.getKeyLength()];
new SecureRandom().nextBytes(keyBytes);
columnDesc.setEncryptionType(cipher.getName());
columnDesc.setEncryptionKey(EncryptionUtil.wrapKey(conf,
User.getCurrent().getShortName(),
new SecretKeySpec(keyBytes, cipher.getName())));
}
if (isNewCf) {
admin.addColumn(tableName, columnDesc);
} else {
admin.modifyColumn(tableName, columnDesc);
try (Connection conn = ConnectionFactory.createConnection(conf);
Admin admin = conn.getAdmin()) {
HTableDescriptor tableDesc = admin.getTableDescriptor(tableName);
LOG.info("Disabling table " + tableName);
admin.disableTable(tableName);
for (byte[] cf : columnFamilies) {
HColumnDescriptor columnDesc = tableDesc.getFamily(cf);
boolean isNewCf = columnDesc == null;
if (isNewCf) {
columnDesc = new HColumnDescriptor(cf);
}
if (bloomType != null) {
columnDesc.setBloomFilterType(bloomType);
}
if (compressAlgo != null) {
columnDesc.setCompressionType(compressAlgo);
}
if (dataBlockEncodingAlgo != null) {
columnDesc.setDataBlockEncoding(dataBlockEncodingAlgo);
}
if (inMemoryCF) {
columnDesc.setInMemory(inMemoryCF);
}
if (cipher != null) {
byte[] keyBytes = new byte[cipher.getKeyLength()];
new SecureRandom().nextBytes(keyBytes);
columnDesc.setEncryptionType(cipher.getName());
columnDesc.setEncryptionKey(
EncryptionUtil.wrapKey(conf,
User.getCurrent().getShortName(),
new SecretKeySpec(keyBytes,
cipher.getName())));
}
if (isNewCf) {
admin.addColumn(tableName, columnDesc);
} else {
admin.modifyColumn(tableName, columnDesc);
}
}
LOG.info("Enabling table " + tableName);
admin.enableTable(tableName);
}
LOG.info("Enabling table " + tableName);
admin.enableTable(tableName);
admin.close();
}
@Override