Revert "HBASE-18503 Change ***Util and Master to use TableDescriptor and ColumnFamilyDescriptor"

Wrong author information
This reverts commit b03348630c.
This commit is contained in:
Chia-Ping Tsai 2017-08-24 13:02:11 +08:00
parent b03348630c
commit 12f2b02a80
77 changed files with 985 additions and 950 deletions

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@ -55,7 +56,6 @@ import org.apache.hadoop.hbase.backup.impl.BackupManifest;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -139,7 +139,7 @@ public final class BackupUtils {
LOG.warn("Table " + table + " does not exists, skipping it.");
continue;
}
TableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir, table);
// write a copy of descriptor to the target directory
Path target = new Path(backupInfo.getTableBackupDir(table));

View File

@ -33,17 +33,16 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.BackupRestoreFactory;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.backup.RestoreJob;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
@ -123,10 +122,10 @@ public class RestoreTool {
}
void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
void modifyTableSync(Connection conn, HTableDescriptor desc) throws IOException {
try (Admin admin = conn.getAdmin();) {
admin.modifyTable(desc);
admin.modifyTable(desc.getTableName(), desc);
int attempt = 0;
int maxAttempts = 600;
while (!admin.isTableAvailable(desc.getTableName())) {
@ -173,30 +172,29 @@ public class RestoreTool {
// adjust table schema
for (int i = 0; i < tableNames.length; i++) {
TableName tableName = tableNames[i];
TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, incrBackupId);
LOG.debug("Found descriptor " + tableDescriptor + " through " + incrBackupId);
TableName newTableName = newTableNames[i];
TableDescriptor newTableDescriptor = admin.listTableDescriptor(newTableName);
List<ColumnFamilyDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<ColumnFamilyDescriptor> existingFamilies =
HTableDescriptor newTableDescriptor = new HTableDescriptor(admin.getTableDescriptor(newTableName));
List<HColumnDescriptor> families = Arrays.asList(tableDescriptor.getColumnFamilies());
List<HColumnDescriptor> existingFamilies =
Arrays.asList(newTableDescriptor.getColumnFamilies());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(newTableDescriptor);
boolean schemaChangeNeeded = false;
for (ColumnFamilyDescriptor family : families) {
for (HColumnDescriptor family : families) {
if (!existingFamilies.contains(family)) {
builder.addColumnFamily(family);
newTableDescriptor.addFamily(family);
schemaChangeNeeded = true;
}
}
for (ColumnFamilyDescriptor family : existingFamilies) {
for (HColumnDescriptor family : existingFamilies) {
if (!families.contains(family)) {
builder.removeColumnFamily(family.getName());
newTableDescriptor.removeFamily(family.getName());
schemaChangeNeeded = true;
}
}
if (schemaChangeNeeded) {
modifyTableSync(conn, builder.build());
modifyTableSync(conn, newTableDescriptor);
LOG.info("Changed " + newTableDescriptor.getTableName() + " to: " + newTableDescriptor);
}
}
@ -255,24 +253,24 @@ public class RestoreTool {
/**
* Get table descriptor
* @param tableName is the table backed up
* @return {@link TableDescriptor} saved in backup image of the table
* @return {@link HTableDescriptor} saved in backup image of the table
*/
TableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
HTableDescriptor getTableDesc(TableName tableName) throws FileNotFoundException, IOException {
Path tableInfoPath = this.getTableInfoPath(tableName);
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, tableInfoPath);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, tableInfoPath, desc);
TableDescriptor tableDescriptor = manifest.getTableDescriptor();
HTableDescriptor tableDescriptor = manifest.getTableDescriptor();
if (!tableDescriptor.getTableName().equals(tableName)) {
LOG.error("couldn't find Table Desc for table: " + tableName + " under tableInfoPath: "
+ tableInfoPath.toString());
LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getTableName().getNameAsString());
LOG.error("tableDescriptor.getNameAsString() = " + tableDescriptor.getNameAsString());
throw new FileNotFoundException("couldn't find Table Desc for table: " + tableName
+ " under tableInfoPath: " + tableInfoPath.toString());
}
return tableDescriptor;
}
private TableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
private HTableDescriptor getTableDescriptor(FileSystem fileSys, TableName tableName,
String lastIncrBackupId) throws IOException {
if (lastIncrBackupId != null) {
String target =
@ -291,7 +289,7 @@ public class RestoreTool {
FileSystem fileSys = tableBackupPath.getFileSystem(this.conf);
// get table descriptor first
TableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
HTableDescriptor tableDescriptor = getTableDescriptor(fileSys, tableName, lastIncrBackupId);
if (tableDescriptor != null) {
LOG.debug("Retrieved descriptor: " + tableDescriptor + " thru " + lastIncrBackupId);
}
@ -327,7 +325,7 @@ public class RestoreTool {
LOG.debug("find table descriptor but no archive dir for table " + tableName
+ ", will only create table");
}
tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
checkAndCreateTable(conn, tableBackupPath, tableName, newTableName, null, tableDescriptor,
truncateIfExists);
return;
@ -338,9 +336,9 @@ public class RestoreTool {
}
if (tableDescriptor == null) {
tableDescriptor = TableDescriptorBuilder.newBuilder(newTableName).build();
tableDescriptor = new HTableDescriptor(newTableName);
} else {
tableDescriptor = TableDescriptorBuilder.copy(newTableName, tableDescriptor);
tableDescriptor = new HTableDescriptor(newTableName, tableDescriptor);
}
// record all region dirs:
@ -472,7 +470,7 @@ public class RestoreTool {
* @throws IOException exception
*/
private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
TableName targetTableName, ArrayList<Path> regionDirList, HTableDescriptor htd,
boolean truncateIfExists) throws IOException {
try (Admin admin = conn.getAdmin();) {
boolean createNew = false;

View File

@ -639,10 +639,13 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
if (this == obj) {
return true;
}
if (obj instanceof HColumnDescriptor) {
return delegatee.equals(((HColumnDescriptor) obj).delegatee);
if (obj == null) {
return false;
}
return false;
if (!(obj instanceof HColumnDescriptor)) {
return false;
}
return compareTo((HColumnDescriptor)obj) == 0;
}
/**
@ -655,7 +658,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable<HCo
@Override
public int compareTo(HColumnDescriptor other) {
return COMPARATOR.compare(this, other);
return delegatee.compareTo(other.delegatee);
}
/**

View File

@ -495,10 +495,13 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
if (this == obj) {
return true;
}
if (obj instanceof HTableDescriptor) {
return delegatee.equals(((HTableDescriptor) obj).delegatee);
if (obj == null) {
return false;
}
return false;
if (!(obj instanceof HTableDescriptor)) {
return false;
}
return compareTo((HTableDescriptor)obj) == 0;
}
/**
@ -520,7 +523,7 @@ public class HTableDescriptor implements TableDescriptor, Comparable<HTableDescr
*/
@Override
public int compareTo(final HTableDescriptor other) {
return TableDescriptor.COMPARATOR.compare(this, other);
return delegatee.compareTo(other.delegatee);
}
/**

View File

@ -1160,10 +1160,13 @@ public class ColumnFamilyDescriptorBuilder {
if (this == obj) {
return true;
}
if (obj instanceof ModifyableColumnFamilyDescriptor) {
return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0;
if (obj == null) {
return false;
}
return false;
if (!(obj instanceof ModifyableColumnFamilyDescriptor)) {
return false;
}
return compareTo((ModifyableColumnFamilyDescriptor) obj) == 0;
}
@Override
@ -1185,7 +1188,7 @@ public class ColumnFamilyDescriptorBuilder {
* @see #parseFrom(byte[])
*/
private byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this)
return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToColumnFamilySchema(this)
.toByteArray());
}
@ -1210,7 +1213,7 @@ public class ColumnFamilyDescriptorBuilder {
} catch (IOException e) {
throw new DeserializationException(e);
}
return ProtobufUtil.toColumnFamilyDescriptor(cfs);
return ProtobufUtil.convertToColumnDesc(cfs);
}
@Override

View File

@ -378,7 +378,7 @@ public class HBaseAdmin implements Admin {
.setNamespaceName(Bytes.toString(name)).build())
.getTableSchemaList()
.stream()
.map(ProtobufUtil::toTableDescriptor)
.map(ProtobufUtil::convertToTableDesc)
.collect(Collectors.toList());
}
});
@ -459,8 +459,8 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables);
return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(),
req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new);
return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(getRpcController(),
req));
}
});
}
@ -525,7 +525,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0));
return ProtobufUtil.convertToTableDesc(htds.getTableSchemaList().get(0));
}
return null;
}
@ -554,7 +554,7 @@ public class HBaseAdmin implements Admin {
RequestConverter.buildGetTableDescriptorsRequest(tableName);
GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req);
if (!htds.getTableSchemaList().isEmpty()) {
return new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)));
return ProtobufUtil.convertToHTableDesc(htds.getTableSchemaList().get(0));
}
return null;
}
@ -2300,7 +2300,7 @@ public class HBaseAdmin implements Admin {
.build()).getTableSchemaList();
HTableDescriptor[] res = new HTableDescriptor[list.size()];
for(int i=0; i < list.size(); i++) {
res[i] = new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(list.get(i)));
res[i] = new ImmutableHTableDescriptor(ProtobufUtil.convertToHTableDesc(list.get(i)));
}
return res;
}
@ -2419,14 +2419,33 @@ public class HBaseAdmin implements Admin {
protected HTableDescriptor[] rpcCall() throws Exception {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), req))
.stream()
.map(ImmutableHTableDescriptor::new)
.toArray(HTableDescriptor[]::new);
return ProtobufUtil.
getHTableDescriptorArray(master.getTableDescriptors(getRpcController(), req));
}
});
}
/**
* Get tableDescriptor
* @param tableName one table name
* @return HTD the HTableDescriptor or null if the table not exists
* @throws IOException if a remote or network exception occurs
*/
private HTableDescriptor getTableDescriptorByTableName(TableName tableName)
throws IOException {
List<TableName> tableNames = new ArrayList<>(1);
tableNames.add(tableName);
HTableDescriptor[] htdl = getTableDescriptorsByTableName(tableNames);
if (htdl == null || htdl.length == 0) {
return null;
}
else {
return htdl[0];
}
}
@Override
public HTableDescriptor[] getTableDescriptors(List<String> names)
throws IOException {
@ -3690,7 +3709,7 @@ public class HBaseAdmin implements Admin {
* @return the table descriptor
*/
protected TableDescriptor getTableDescriptor() throws IOException {
return getAdmin().listTableDescriptor(getTableName());
return getAdmin().getTableDescriptorByTableName(getTableName());
}
/**

View File

@ -453,7 +453,7 @@ public class RawAsyncHBaseAdmin implements AsyncAdmin {
return;
}
if (!tableSchemas.isEmpty()) {
future.complete(ProtobufUtil.toTableDescriptor(tableSchemas.get(0)));
future.complete(ProtobufUtil.convertToTableDesc(tableSchemas.get(0)));
} else {
future.completeExceptionally(new TableNotFoundException(tableName.getNameAsString()));
}

View File

@ -38,7 +38,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
@ -253,14 +252,10 @@ public class TableDescriptorBuilder {
return new TableDescriptorBuilder(name);
}
public static TableDescriptor copy(TableDescriptor desc) {
public static TableDescriptor copy(TableDescriptor desc) throws DeserializationException {
return new ModifyableTableDescriptor(desc);
}
public static TableDescriptor copy(TableName name, TableDescriptor desc) {
return new ModifyableTableDescriptor(name, desc);
}
/**
* Copy all configuration, values, families, and name from the input.
* @param desc The desciptor to copy
@ -1017,10 +1012,13 @@ public class TableDescriptorBuilder {
if (this == obj) {
return true;
}
if (obj instanceof ModifyableTableDescriptor) {
return TableDescriptor.COMPARATOR.compare(this, (ModifyableTableDescriptor) obj) == 0;
if (obj == null) {
return false;
}
return false;
if (!(obj instanceof ModifyableTableDescriptor)) {
return false;
}
return compareTo((ModifyableTableDescriptor) obj) == 0;
}
/**
@ -1397,7 +1395,7 @@ public class TableDescriptorBuilder {
* @return the bytes in pb format
*/
private byte[] toByteArray() {
return ProtobufUtil.prependPBMagic(ProtobufUtil.toTableSchema(this).toByteArray());
return ProtobufUtil.prependPBMagic(ProtobufUtil.convertToTableSchema(this).toByteArray());
}
/**
@ -1417,7 +1415,7 @@ public class TableDescriptorBuilder {
HBaseProtos.TableSchema.Builder builder = HBaseProtos.TableSchema.newBuilder();
try {
ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
return ProtobufUtil.toTableDescriptor(builder.build());
return ProtobufUtil.convertToTableDesc(builder.build());
} catch (IOException e) {
throw new DeserializationException(e);
}

View File

@ -17,12 +17,15 @@
*/
package org.apache.hadoop.hbase.shaded.protobuf;
import java.awt.image.BandCombineOp;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InterruptedIOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Method;
import java.nio.ByteBuffer;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
@ -47,8 +50,10 @@ import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@ -69,6 +74,7 @@ import org.apache.hadoop.hbase.client.Cursor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.PackagePrivateFieldAccessor;
@ -96,6 +102,7 @@ import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
import org.apache.hadoop.hbase.quotas.ThrottleType;
import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.visibility.Authorizations;
import org.apache.hadoop.hbase.security.visibility.CellVisibility;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
@ -175,6 +182,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.DynamicClassLoader;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
import org.apache.hadoop.hbase.util.Methods;
@ -416,6 +424,24 @@ public final class ProtobufUtil {
.collect(Collectors.toList());
}
/**
* Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
*
* @param proto the GetTableDescriptorsResponse
* @return a immutable HTableDescriptor array
* @deprecated Use {@link #toTableDescriptorList} after removing the HTableDescriptor
*/
@Deprecated
public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
if (proto == null) return null;
HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
ret[i] = new ImmutableHTableDescriptor(convertToHTableDesc(proto.getTableSchema(i)));
}
return ret;
}
/**
* Get a list of TableDescriptor from GetTableDescriptorsResponse protobuf
*
@ -424,7 +450,7 @@ public final class ProtobufUtil {
*/
public static List<TableDescriptor> toTableDescriptorList(GetTableDescriptorsResponse proto) {
if (proto == null) return new ArrayList<>();
return proto.getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor)
return proto.getTableSchemaList().stream().map(ProtobufUtil::convertToTableDesc)
.collect(Collectors.toList());
}
@ -2815,11 +2841,11 @@ public final class ProtobufUtil {
}
/**
* Converts an ColumnFamilyDescriptor to ColumnFamilySchema
* @param hcd the ColumnFamilySchema
* Converts an HColumnDescriptor to ColumnFamilySchema
* @param hcd the HColummnDescriptor
* @return Convert this instance to a the pb column family type
*/
public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd) {
public static ColumnFamilySchema convertToColumnFamilySchema(ColumnFamilyDescriptor hcd) {
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
builder.setName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
for (Map.Entry<Bytes, Bytes> e : hcd.getValues().entrySet()) {
@ -2838,11 +2864,31 @@ public final class ProtobufUtil {
}
/**
* Converts a ColumnFamilySchema to ColumnFamilyDescriptor
* Converts a ColumnFamilySchema to HColumnDescriptor
* @param cfs the ColumnFamilySchema
* @return An {@link ColumnFamilyDescriptor} made from the passed in <code>cfs</code>
* @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
*/
public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) {
@Deprecated
public static HColumnDescriptor convertToHColumnDesc(final ColumnFamilySchema cfs) {
// Use the empty constructor so we preserve the initial values set on construction for things
// like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
// unrelated-looking test failures that are hard to trace back to here.
HColumnDescriptor hcd = new HColumnDescriptor(cfs.getName().toByteArray());
for (BytesBytesPair a: cfs.getAttributesList()) {
hcd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
}
for (NameStringPair a: cfs.getConfigurationList()) {
hcd.setConfiguration(a.getName(), a.getValue());
}
return hcd;
}
/**
* Converts a ColumnFamilySchema to HColumnDescriptor
* @param cfs the ColumnFamilySchema
* @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
*/
public static ColumnFamilyDescriptor convertToColumnDesc(final ColumnFamilySchema cfs) {
// Use the empty constructor so we preserve the initial values set on construction for things
// like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
// unrelated-looking test failures that are hard to trace back to here.
@ -2854,11 +2900,11 @@ public final class ProtobufUtil {
}
/**
* Converts an TableDescriptor to TableSchema
* @param htd the TableDescriptor
* @return Convert the current {@link TableDescriptor} into a pb TableSchema instance.
* Converts an HTableDescriptor to TableSchema
* @param htd the HTableDescriptor
* @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
*/
public static TableSchema toTableSchema(TableDescriptor htd) {
public static TableSchema convertToTableSchema(TableDescriptor htd) {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setTableName(toProtoTableName(htd.getTableName()));
for (Map.Entry<Bytes, Bytes> e : htd.getValues().entrySet()) {
@ -2868,7 +2914,7 @@ public final class ProtobufUtil {
builder.addAttributes(aBuilder.build());
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
builder.addColumnFamilies(toColumnFamilySchema(hcd));
builder.addColumnFamilies(convertToColumnFamilySchema(hcd));
}
for (Map.Entry<String, String> e : htd.getConfiguration().entrySet()) {
NameStringPair.Builder aBuilder = NameStringPair.newBuilder();
@ -2879,17 +2925,44 @@ public final class ProtobufUtil {
return builder.build();
}
/**
* Converts a TableSchema to HTableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
* @deprecated Use {@link #convertToTableDesc} after removing the HTableDescriptor
*/
@Deprecated
public static HTableDescriptor convertToHTableDesc(final TableSchema ts) {
List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
int index = 0;
for (ColumnFamilySchema cfs: list) {
hcds[index++] = ProtobufUtil.convertToHColumnDesc(cfs);
}
HTableDescriptor htd = new HTableDescriptor(ProtobufUtil.toTableName(ts.getTableName()));
for (HColumnDescriptor hcd : hcds) {
htd.addFamily(hcd);
}
for (BytesBytesPair a: ts.getAttributesList()) {
htd.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray());
}
for (NameStringPair a: ts.getConfigurationList()) {
htd.setConfiguration(a.getName(), a.getValue());
}
return htd;
}
/**
* Converts a TableSchema to TableDescriptor
* @param ts A pb TableSchema instance.
* @return An {@link TableDescriptor} made from the passed in pb <code>ts</code>.
*/
public static TableDescriptor toTableDescriptor(final TableSchema ts) {
public static TableDescriptor convertToTableDesc(final TableSchema ts) {
TableDescriptorBuilder builder
= TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName()));
ts.getColumnFamiliesList()
.stream()
.map(ProtobufUtil::toColumnFamilyDescriptor)
.map(ProtobufUtil::convertToColumnDesc)
.forEach(builder::addColumnFamily);
ts.getAttributesList()
.forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray()));

View File

@ -1080,7 +1080,7 @@ public final class RequestConverter {
final long nonce) {
AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@ -1120,7 +1120,7 @@ public final class RequestConverter {
final long nonce) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column));
builder.setColumnFamilies(ProtobufUtil.convertToColumnFamilySchema(column));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();
@ -1306,28 +1306,28 @@ public final class RequestConverter {
/**
* Creates a protocol buffer CreateTableRequest
*
* @param tableDescriptor
* @param hTableDesc
* @param splitKeys
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(
final TableDescriptor tableDescriptor,
final TableDescriptor hTableDesc,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) {
return buildCreateTableRequest(tableDescriptor, Optional.ofNullable(splitKeys), nonceGroup, nonce);
return buildCreateTableRequest(hTableDesc, Optional.ofNullable(splitKeys), nonceGroup, nonce);
}
/**
* Creates a protocol buffer CreateTableRequest
* @param tableDescriptor
* @param hTableDesc
* @param splitKeys
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(TableDescriptor tableDescriptor,
public static CreateTableRequest buildCreateTableRequest(TableDescriptor hTableDesc,
Optional<byte[][]> splitKeys, long nonceGroup, long nonce) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
builder.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDesc));
splitKeys.ifPresent(keys -> Arrays.stream(keys).forEach(
key -> builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key))));
builder.setNonceGroup(nonceGroup);
@ -1349,7 +1349,7 @@ public final class RequestConverter {
final long nonce) {
ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder();
builder.setTableName(ProtobufUtil.toProtoTableName((tableName)));
builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc));
builder.setTableSchema(ProtobufUtil.convertToTableSchema(tableDesc));
builder.setNonceGroup(nonceGroup);
builder.setNonce(nonce);
return builder.build();

View File

@ -21,7 +21,6 @@ import java.io.IOException;
import java.util.Map;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
/**
* Get, remove and modify table descriptors.
@ -34,7 +33,7 @@ public interface TableDescriptors {
* @return TableDescriptor for tablename
* @throws IOException
*/
TableDescriptor get(final TableName tableName)
HTableDescriptor get(final TableName tableName)
throws IOException;
/**
@ -42,7 +41,16 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getByNamespace(String name)
Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException;
/**
* Get Map of all HTableDescriptors. Populates the descriptor cache as a
* side effect.
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, HTableDescriptor> getAll()
throws IOException;
/**
@ -51,16 +59,7 @@ public interface TableDescriptors {
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getAll()
throws IOException;
/**
* Get Map of all TableDescriptors. Populates the descriptor cache as a
* side effect.
* @return Map of all descriptors.
* @throws IOException
*/
Map<String, TableDescriptor> getAllDescriptors()
Map<String, HTableDescriptor> getAllDescriptors()
throws IOException;
/**
@ -68,7 +67,7 @@ public interface TableDescriptors {
* @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/
void add(final TableDescriptor htd)
void add(final HTableDescriptor htd)
throws IOException;
/**
@ -76,7 +75,7 @@ public interface TableDescriptors {
* @return Instance of table descriptor or null if none found.
* @throws IOException
*/
TableDescriptor remove(final TableName tablename)
HTableDescriptor remove(final TableName tablename)
throws IOException;
/**

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -49,7 +50,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
List<Cell> values;
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, TableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.util.FSUtils;
@ -74,7 +75,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
private Path restoreDir;
private Scan scan;
private ArrayList<HRegionInfo> regions;
private TableDescriptor htd;
private HTableDescriptor htd;
private ClientSideRegionScanner currentRegionScanner = null;
private int currentRegion = -1;

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hbase.mapreduce;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -29,6 +28,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HDFSBlocksDistribution.HostAndWeight;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.ClientSideRegionScanner;
@ -81,7 +81,7 @@ public class TableSnapshotInputFormatImpl {
*/
public static class InputSplit implements Writable {
private TableDescriptor htd;
private HTableDescriptor htd;
private HRegionInfo regionInfo;
private String[] locations;
private String scan;
@ -90,7 +90,7 @@ public class TableSnapshotInputFormatImpl {
// constructor for mapreduce framework / Writable
public InputSplit() {}
public InputSplit(TableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
public InputSplit(HTableDescriptor htd, HRegionInfo regionInfo, List<String> locations,
Scan scan, Path restoreDir) {
this.htd = htd;
this.regionInfo = regionInfo;
@ -108,7 +108,7 @@ public class TableSnapshotInputFormatImpl {
this.restoreDir = restoreDir.toString();
}
public TableDescriptor getHtd() {
public HTableDescriptor getHtd() {
return htd;
}
@ -129,7 +129,7 @@ public class TableSnapshotInputFormatImpl {
return locations;
}
public TableDescriptor getTableDescriptor() {
public HTableDescriptor getTableDescriptor() {
return htd;
}
@ -142,7 +142,7 @@ public class TableSnapshotInputFormatImpl {
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
.setTable(ProtobufUtil.toTableSchema(htd))
.setTable(ProtobufUtil.convertToTableSchema(htd))
.setRegion(HRegionInfo.convert(regionInfo));
for (String location : locations) {
@ -169,7 +169,7 @@ public class TableSnapshotInputFormatImpl {
byte[] buf = new byte[len];
in.readFully(buf);
TableSnapshotRegionSplit split = TableSnapshotRegionSplit.PARSER.parseFrom(buf);
this.htd = ProtobufUtil.toTableDescriptor(split.getTable());
this.htd = ProtobufUtil.convertToHTableDesc(split.getTable());
this.regionInfo = HRegionInfo.convert(split.getRegion());
List<String> locationsList = split.getLocationsList();
this.locations = locationsList.toArray(new String[locationsList.size()]);
@ -196,7 +196,7 @@ public class TableSnapshotInputFormatImpl {
public void initialize(InputSplit split, Configuration conf) throws IOException {
this.scan = TableMapReduceUtil.convertStringToScan(split.getScan());
this.split = split;
TableDescriptor htd = split.htd;
HTableDescriptor htd = split.htd;
HRegionInfo hri = this.split.getRegionInfo();
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
@ -311,7 +311,7 @@ public class TableSnapshotInputFormatImpl {
public static List<InputSplit> getSplits(Scan scan, SnapshotManifest manifest,
List<HRegionInfo> regionManifests, Path restoreDir, Configuration conf) throws IOException {
// load table descriptor
TableDescriptor htd = manifest.getTableDescriptor();
HTableDescriptor htd = manifest.getTableDescriptor();
Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());

View File

@ -31,20 +31,21 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.assignment.GCMergedRegionsProcedure;
import org.apache.hadoop.hbase.master.assignment.GCRegionProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
@ -205,7 +206,7 @@ public class CatalogJanitor extends ScheduledChore {
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, mergedRegion.getTable());
TableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HTableDescriptor htd = getTableDescriptor(mergedRegion.getTable());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
@ -413,12 +414,12 @@ public class CatalogJanitor extends ScheduledChore {
}
boolean references = false;
TableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTable());
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, daughter, true);
for (ColumnFamilyDescriptor family: parentDescriptor.getColumnFamilies()) {
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
}
@ -431,7 +432,7 @@ public class CatalogJanitor extends ScheduledChore {
return new Pair<>(Boolean.TRUE, Boolean.valueOf(references));
}
private TableDescriptor getTableDescriptor(final TableName tableName)
private HTableDescriptor getTableDescriptor(final TableName tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(tableName);
}

View File

@ -23,11 +23,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
@ -61,9 +61,9 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
Map<String, TableDescriptor> map = htds.getAll();
for (TableDescriptor htd : map.values()) {
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
Map<String, HTableDescriptor> map = htds.getAll();
for (HTableDescriptor htd : map.values()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
// clean only for mob-enabled column.
// obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.

View File

@ -41,7 +41,6 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.regex.Pattern;
import javax.servlet.ServletException;
@ -61,8 +60,10 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
@ -76,12 +77,9 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.coprocessor.BypassCoprocessorException;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@ -592,9 +590,11 @@ public class HMaster extends HRegionServer implements MasterServices {
return connector.getLocalPort();
}
protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
return builder -> builder.setRegionReplication(conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
@Override
protected TableDescriptors getFsTableDescriptors() throws IOException {
return super.getFsTableDescriptors();
}
/**
* For compatibility, if failed with regionserver credentials, try the master one
*/
@ -761,7 +761,9 @@ public class HMaster extends HRegionServer implements MasterServices {
// enable table descriptors cache
this.tableDescriptors.setCacheOn();
// set the META's descriptor to the correct replication
this.tableDescriptors.get(TableName.META_TABLE_NAME).setRegionReplication(
conf.getInt(HConstants.META_REPLICAS_NUM, HConstants.DEFAULT_META_REPLICA_NUM));
// warm-up HTDs cache on master initialization
if (preLoadTableDescriptors) {
status.setStatus("Pre-loading table descriptors");
@ -1499,7 +1501,7 @@ public class HMaster extends HRegionServer implements MasterServices {
return false;
}
TableDescriptor tblDesc = getTableDescriptors().get(table);
HTableDescriptor tblDesc = getTableDescriptors().get(table);
if (table.isSystemTable() || (tblDesc != null &&
!tblDesc.isNormalizationEnabled())) {
LOG.debug("Skipping normalization for table: " + table + ", as it's either system"
@ -1710,34 +1712,34 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long createTable(
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final byte [][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
checkInitialized();
String namespace = tableDescriptor.getTableName().getNamespaceAsString();
String namespace = hTableDescriptor.getTableName().getNamespaceAsString();
this.clusterSchemaService.getNamespace(namespace);
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, splitKeys);
sanityCheckTableDescriptor(tableDescriptor);
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, splitKeys);
sanityCheckTableDescriptor(hTableDescriptor);
return MasterProcedureUtil.submitProcedure(
new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions);
getMaster().getMasterCoprocessorHost().preCreateTable(hTableDescriptor, newRegions);
LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
// TODO: We can handle/merge duplicate requests, and differentiate the case of
// TableExistsException by saying if the schema is the same or not.
ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch();
submitProcedure(new CreateTableProcedure(
procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch));
procedureExecutor.getEnvironment(), hTableDescriptor, newRegions, latch));
latch.await();
getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions);
getMaster().getMasterCoprocessorHost().postCreateTable(hTableDescriptor, newRegions);
}
@Override
@ -1748,25 +1750,25 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
if (isStopped()) {
throw new MasterNotRunningException();
}
TableName tableName = tableDescriptor.getTableName();
TableName tableName = hTableDescriptor.getTableName();
if (!(tableName.isSystemTable())) {
throw new IllegalArgumentException(
"Only system table creation can use this createSystemTable API");
}
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(tableDescriptor, null);
HRegionInfo[] newRegions = ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null);
LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
LOG.info(getClientIdAuditPrefix() + " create " + hTableDescriptor);
// This special create table is called locally to master. Therefore, no RPC means no need
// to use nonce to detect duplicated RPC call.
long procId = this.procedureExecutor.submitProcedure(
new CreateTableProcedure(procedureExecutor.getEnvironment(), tableDescriptor, newRegions));
new CreateTableProcedure(procedureExecutor.getEnvironment(), hTableDescriptor, newRegions));
return procId;
}
@ -1776,7 +1778,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* values (compression, etc) work. Throws an exception if something is wrong.
* @throws IOException
*/
private void sanityCheckTableDescriptor(final TableDescriptor htd) throws IOException {
private void sanityCheckTableDescriptor(final HTableDescriptor htd) throws IOException {
final String CONF_KEY = "hbase.table.sanity.checks";
boolean logWarn = false;
if (!conf.getBoolean(CONF_KEY, true)) {
@ -1846,7 +1848,7 @@ public class HMaster extends HRegionServer implements MasterServices {
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (hcd.getTimeToLive() <= 0) {
String message = "TTL for column family " + hcd.getNameAsString() + " must be positive.";
warnOrThrowExceptionForFailure(logWarn, CONF_KEY, message, null);
@ -1867,7 +1869,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
// max versions already being checked
// HBASE-13776 Setting illegal versions for ColumnFamilyDescriptor
// HBASE-13776 Setting illegal versions for HColumnDescriptor
// does not throw IllegalArgumentException
// check minVersions <= maxVerions
if (hcd.getMinVersions() > hcd.getMaxVersions()) {
@ -1891,7 +1893,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
private void checkReplicationScope(ColumnFamilyDescriptor hcd) throws IOException{
private void checkReplicationScope(HColumnDescriptor hcd) throws IOException{
// check replication scope
WALProtos.ScopeType scop = WALProtos.ScopeType.valueOf(hcd.getScope());
if (scop == null) {
@ -1903,7 +1905,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
}
private void checkCompactionPolicy(Configuration conf, TableDescriptor htd)
private void checkCompactionPolicy(Configuration conf, HTableDescriptor htd)
throws IOException {
// FIFO compaction has some requirements
// Actually FCP ignores periodic major compactions
@ -1923,7 +1925,7 @@ public class HMaster extends HRegionServer implements MasterServices {
blockingFileCount = conf.getInt(HStore.BLOCKING_STOREFILES_KEY, blockingFileCount);
}
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
String compactionPolicy =
hcd.getConfigurationValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY);
if (compactionPolicy == null) {
@ -1936,7 +1938,7 @@ public class HMaster extends HRegionServer implements MasterServices {
String message = null;
// 1. Check TTL
if (hcd.getTimeToLive() == ColumnFamilyDescriptorBuilder.DEFAULT_TTL) {
if (hcd.getTimeToLive() == HColumnDescriptor.DEFAULT_TTL) {
message = "Default TTL is not supported for FIFO compaction";
throw new IOException(message);
}
@ -2038,36 +2040,36 @@ public class HMaster extends HRegionServer implements MasterServices {
}, getServerName().toShortString() + ".masterManager"));
}
private void checkCompression(final TableDescriptor htd)
private void checkCompression(final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckCompression) return;
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
checkCompression(hcd);
}
}
private void checkCompression(final ColumnFamilyDescriptor hcd)
private void checkCompression(final HColumnDescriptor hcd)
throws IOException {
if (!this.masterCheckCompression) return;
CompressionTest.testCompression(hcd.getCompressionType());
CompressionTest.testCompression(hcd.getCompactionCompressionType());
}
private void checkEncryption(final Configuration conf, final TableDescriptor htd)
private void checkEncryption(final Configuration conf, final HTableDescriptor htd)
throws IOException {
if (!this.masterCheckEncryption) return;
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
checkEncryption(conf, hcd);
}
}
private void checkEncryption(final Configuration conf, final ColumnFamilyDescriptor hcd)
private void checkEncryption(final Configuration conf, final HColumnDescriptor hcd)
throws IOException {
if (!this.masterCheckEncryption) return;
EncryptionTest.testEncryption(conf, hcd.getEncryptionType(), hcd.getEncryptionKey());
}
private void checkClassLoading(final Configuration conf, final TableDescriptor htd)
private void checkClassLoading(final Configuration conf, final HTableDescriptor htd)
throws IOException {
RegionSplitPolicy.getSplitPolicyClass(htd, conf);
RegionCoprocessorHost.testTableCoprocessorAttrs(conf, htd);
@ -2141,7 +2143,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long addColumn(
final TableName tableName,
final ColumnFamilyDescriptor columnDescriptor,
final HColumnDescriptor columnDescriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@ -2177,7 +2179,7 @@ public class HMaster extends HRegionServer implements MasterServices {
@Override
public long modifyColumn(
final TableName tableName,
final ColumnFamilyDescriptor descriptor,
final HColumnDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException {
@ -2371,7 +2373,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
@Override
public long modifyTable(final TableName tableName, final TableDescriptor descriptor,
public long modifyTable(final TableName tableName, final HTableDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
checkInitialized();
sanityCheckTableDescriptor(descriptor);
@ -3125,7 +3127,7 @@ public class HMaster extends HRegionServer implements MasterServices {
throws IOException {
if (tableNameList == null || tableNameList.isEmpty()) {
// request for all TableDescriptors
Collection<TableDescriptor> allHtds;
Collection<HTableDescriptor> allHtds;
if (namespace != null && namespace.length() > 0) {
// Do a check on the namespace existence. Will fail if does not exist.
this.clusterSchemaService.getNamespace(namespace);
@ -3133,7 +3135,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
allHtds = tableDescriptors.getAll().values();
}
for (TableDescriptor desc: allHtds) {
for (HTableDescriptor desc: allHtds) {
if (tableStateManager.isTablePresent(desc.getTableName())
&& (includeSysTables || !desc.getTableName().isSystemTable())) {
htds.add(desc);
@ -3142,7 +3144,7 @@ public class HMaster extends HRegionServer implements MasterServices {
} else {
for (TableName s: tableNameList) {
if (tableStateManager.isTablePresent(s)) {
TableDescriptor desc = tableDescriptors.get(s);
HTableDescriptor desc = tableDescriptors.get(s);
if (desc != null) {
htds.add(desc);
}
@ -3247,7 +3249,7 @@ public class HMaster extends HRegionServer implements MasterServices {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(TableName tableName,
List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
mobCompactThread.requestMobCompaction(conf, fs, tableName, columns, allFiles);
}

View File

@ -28,15 +28,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.ClusterId;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
@ -389,8 +387,10 @@ public class MasterFileSystem {
// not make it in first place. Turn off block caching for bootstrap.
// Enable after.
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
setInfoFamilyCachingForMeta(metaDescriptor, false);
HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null);
setInfoFamilyCachingForMeta(metaDescriptor, true);
meta.close();
} catch (IOException e) {
e = e instanceof RemoteException ?
@ -403,17 +403,13 @@ public class MasterFileSystem {
/**
* Enable in memory caching for hbase:meta
*/
public static TableDescriptor setInfoFamilyCachingForMeta(TableDescriptor metaDescriptor, final boolean b) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(metaDescriptor);
for (ColumnFamilyDescriptor hcd: metaDescriptor.getColumnFamilies()) {
public static void setInfoFamilyCachingForMeta(HTableDescriptor metaDescriptor, final boolean b) {
for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) {
if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) {
builder.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(hcd)
.setBlockCacheEnabled(b)
.setInMemory(b)
.build());
hcd.setBlockCacheEnabled(b);
hcd.setInMemory(b);
}
}
return builder.build();
}
public void deleteFamilyFromFS(HRegionInfo region, byte[] familyName)

View File

@ -31,9 +31,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.mob.MobUtils;
@ -79,7 +79,7 @@ public class MasterMobCompactionThread {
* @param allFiles Whether add all mob files into the compaction.
*/
public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
List<ColumnFamilyDescriptor> columns, boolean allFiles) throws IOException {
List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
master.reportMobCompactionStart(tableName);
try {
masterMobPool.execute(new CompactionRunner(fs, tableName, columns,
@ -102,11 +102,11 @@ public class MasterMobCompactionThread {
private class CompactionRunner implements Runnable {
private FileSystem fs;
private TableName tableName;
private List<ColumnFamilyDescriptor> hcds;
private List<HColumnDescriptor> hcds;
private boolean allFiles;
private ExecutorService pool;
public CompactionRunner(FileSystem fs, TableName tableName, List<ColumnFamilyDescriptor> hcds,
public CompactionRunner(FileSystem fs, TableName tableName, List<HColumnDescriptor> hcds,
boolean allFiles, ExecutorService pool) {
super();
this.fs = fs;
@ -123,7 +123,7 @@ public class MasterMobCompactionThread {
MobUtils.getTableLockName(tableName), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
try {
for (ColumnFamilyDescriptor hcd : hcds) {
for (HColumnDescriptor hcd : hcds) {
MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock);
}
} catch (IOException e) {

View File

@ -31,8 +31,10 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
@ -41,7 +43,6 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
@ -362,7 +363,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.addColumn(
ProtobufUtil.toTableName(req.getTableName()),
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@ -438,11 +439,11 @@ public class MasterRpcServices extends RSRpcServices
@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException {
TableDescriptor tableDescriptor = ProtobufUtil.toTableDescriptor(req.getTableSchema());
HTableDescriptor hTableDescriptor = ProtobufUtil.convertToHTableDesc(req.getTableSchema());
byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
try {
long procId =
master.createTable(tableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
master.createTable(hTableDescriptor, splitKeys, req.getNonceGroup(), req.getNonce());
return CreateTableResponse.newBuilder().setProcId(procId).build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
@ -864,7 +865,7 @@ public class MasterRpcServices extends RSRpcServices
if (descriptors != null && descriptors.size() > 0) {
// Add the table descriptors to the response
for (TableDescriptor htd: descriptors) {
builder.addTableSchema(ProtobufUtil.toTableSchema(htd));
builder.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
}
}
return builder.build();
@ -1117,7 +1118,7 @@ public class MasterRpcServices extends RSRpcServices
ListTableDescriptorsByNamespaceResponse.newBuilder();
for (TableDescriptor htd : master
.listTableDescriptorsByNamespace(request.getNamespaceName())) {
b.addTableSchema(ProtobufUtil.toTableSchema(htd));
b.addTableSchema(ProtobufUtil.convertToTableSchema(htd));
}
return b.build();
} catch (IOException e) {
@ -1146,7 +1147,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyColumn(
ProtobufUtil.toTableName(req.getTableName()),
ProtobufUtil.toColumnFamilyDescriptor(req.getColumnFamilies()),
ProtobufUtil.convertToHColumnDesc(req.getColumnFamilies()),
req.getNonceGroup(),
req.getNonce());
if (procId == -1) {
@ -1180,7 +1181,7 @@ public class MasterRpcServices extends RSRpcServices
try {
long procId = master.modifyTable(
ProtobufUtil.toTableName(req.getTableName()),
ProtobufUtil.toTableDescriptor(req.getTableSchema()),
ProtobufUtil.convertToHTableDesc(req.getTableSchema()),
req.getNonceGroup(),
req.getNonce());
return ModifyTableResponse.newBuilder().setProcId(procId).build();
@ -1531,12 +1532,12 @@ public class MasterRpcServices extends RSRpcServices
throw new DoNotRetryIOException("Table " + tableName + " is not enabled");
}
boolean allFiles = false;
List<ColumnFamilyDescriptor> compactedColumns = new ArrayList<>();
ColumnFamilyDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
List<HColumnDescriptor> compactedColumns = new ArrayList<>();
HColumnDescriptor[] hcds = master.getTableDescriptors().get(tableName).getColumnFamilies();
byte[] family = null;
if (request.hasFamily()) {
family = request.getFamily().toByteArray();
for (ColumnFamilyDescriptor hcd : hcds) {
for (HColumnDescriptor hcd : hcds) {
if (Bytes.equals(family, hcd.getName())) {
if (!hcd.isMobEnabled()) {
LOG.error("Column family " + hcd.getNameAsString() + " is not a mob column family");
@ -1547,7 +1548,7 @@ public class MasterRpcServices extends RSRpcServices
}
}
} else {
for (ColumnFamilyDescriptor hcd : hcds) {
for (HColumnDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
compactedColumns.add(hcd);
}

View File

@ -21,7 +21,9 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
@ -30,7 +32,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@ -158,17 +159,17 @@ public interface MasterServices extends Server {
* a single region is created.
*/
long createTable(
final TableDescriptor desc,
final HTableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException;
/**
* Create a system table using the given table definition.
* @param tableDescriptor The system table definition
* @param hTableDescriptor The system table definition
* a single region is created.
*/
long createSystemTable(final TableDescriptor tableDescriptor) throws IOException;
long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException;
/**
* Delete a table
@ -206,7 +207,7 @@ public interface MasterServices extends Server {
*/
long modifyTable(
final TableName tableName,
final TableDescriptor descriptor,
final HTableDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;
@ -246,7 +247,7 @@ public interface MasterServices extends Server {
*/
long addColumn(
final TableName tableName,
final ColumnFamilyDescriptor column,
final HColumnDescriptor column,
final long nonceGroup,
final long nonce)
throws IOException;
@ -261,7 +262,7 @@ public interface MasterServices extends Server {
*/
long modifyColumn(
final TableName tableName,
final ColumnFamilyDescriptor descriptor,
final HColumnDescriptor descriptor,
final long nonceGroup,
final long nonce)
throws IOException;

View File

@ -24,11 +24,11 @@ import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
@ -55,8 +55,8 @@ public class MobCompactionChore extends ScheduledChore {
protected void chore() {
try {
TableDescriptors htds = master.getTableDescriptors();
Map<String, TableDescriptor> map = htds.getAll();
for (TableDescriptor htd : map.values()) {
Map<String, HTableDescriptor> map = htds.getAll();
for (HTableDescriptor htd : map.values()) {
if (!master.getTableStateManager().isTableState(htd.getTableName(),
TableState.State.ENABLED)) {
continue;
@ -66,7 +66,7 @@ public class MobCompactionChore extends ScheduledChore {
final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE,
this.getClass().getName() + ": mob compaction");
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
if (!hcd.isMobEnabled()) {
continue;
}

View File

@ -24,12 +24,12 @@ import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import edu.umd.cs.findbugs.annotations.NonNull;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@ -198,7 +198,7 @@ public class TableStateManager {
public static void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
throws IOException {
final Map<String, TableDescriptor> allDescriptors =
final Map<String, HTableDescriptor> allDescriptors =
tableDescriptors.getAllDescriptors();
final Map<String, TableState> states = new HashMap<>();
MetaTableAccessor.fullScanTables(connection, new MetaTableAccessor.Visitor() {
@ -210,7 +210,7 @@ public class TableStateManager {
return true;
}
});
for (Map.Entry<String, TableDescriptor> entry : allDescriptors.entrySet()) {
for (Map.Entry<String, HTableDescriptor> entry : allDescriptors.entrySet()) {
String table = entry.getKey();
if (table.equals(TableName.META_TABLE_NAME.getNameAsString()))
continue;

View File

@ -31,18 +31,18 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaMutationAnnotation;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.master.CatalogJanitor;
@ -603,10 +603,10 @@ public class MergeTableRegionsProcedure
throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Configuration conf = env.getMasterConfiguration();
final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
for (String family: regionFs.getFamilies()) {
final ColumnFamilyDescriptor hcd = htd.getColumnFamily(family.getBytes());
final HColumnDescriptor hcd = htd.getFamily(family.getBytes());
final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family);
if (storeFiles != null && storeFiles.size() > 0) {
@ -682,7 +682,7 @@ public class MergeTableRegionsProcedure
}
private int getRegionReplication(final MasterProcedureEnv env) throws IOException {
final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
final HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName());
return htd.getRegionReplication();
}

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.RegionState.State;
@ -221,7 +221,7 @@ public class RegionStateStore {
// ============================================================================================
public void splitRegion(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
final TableDescriptor htd = getTableDescriptor(parent.getTable());
final HTableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.splitRegion(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), hasSerialReplicationScope(htd));
}
@ -231,7 +231,7 @@ public class RegionStateStore {
// ============================================================================================
public void mergeRegions(final HRegionInfo parent, final HRegionInfo hriA,
final HRegionInfo hriB, final ServerName serverName) throws IOException {
final TableDescriptor htd = getTableDescriptor(parent.getTable());
final HTableDescriptor htd = getTableDescriptor(parent.getTable());
MetaTableAccessor.mergeRegions(master.getConnection(), parent, hriA, hriB, serverName,
getRegionReplication(htd), EnvironmentEdgeManager.currentTime(),
hasSerialReplicationScope(htd));
@ -255,15 +255,15 @@ public class RegionStateStore {
return hasSerialReplicationScope(getTableDescriptor(tableName));
}
private boolean hasSerialReplicationScope(final TableDescriptor htd) {
private boolean hasSerialReplicationScope(final HTableDescriptor htd) {
return (htd != null)? htd.hasSerialReplicationScope(): false;
}
private int getRegionReplication(final TableDescriptor htd) {
private int getRegionReplication(final HTableDescriptor htd) {
return (htd != null) ? htd.getRegionReplication() : 1;
}
private TableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
private HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException {
return master.getTableDescriptors().get(tableName);
}

View File

@ -34,10 +34,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -194,7 +194,7 @@ class RegionLocationFinder {
*/
protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) {
try {
TableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable());
if (tableDescriptor != null) {
HDFSBlocksDistribution blocksDistribution =
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
@ -209,14 +209,14 @@ class RegionLocationFinder {
}
/**
* return TableDescriptor for a given tableName
* return HTableDescriptor for a given tableName
*
* @param tableName the table name
* @return TableDescriptor
* @return HTableDescriptor
* @throws IOException
*/
protected TableDescriptor getTableDescriptor(TableName tableName) throws IOException {
TableDescriptor tableDescriptor = null;
protected HTableDescriptor getTableDescriptor(TableName tableName) throws IOException {
HTableDescriptor tableDescriptor = null;
try {
if (this.services != null && this.services.getTableDescriptors() != null) {
tableDescriptor = this.services.getTableDescriptors().get(tableName);

View File

@ -27,17 +27,17 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Bytes;
@ -63,11 +63,11 @@ public class ReplicationMetaCleaner extends ScheduledChore {
@Override
protected void chore() {
try {
Map<String, TableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, HTableDescriptor> tables = master.getTableDescriptors().getAllDescriptors();
Map<String, Set<String>> serialTables = new HashMap<>();
for (Map.Entry<String, TableDescriptor> entry : tables.entrySet()) {
for (Map.Entry<String, HTableDescriptor> entry : tables.entrySet()) {
boolean hasSerialScope = false;
for (ColumnFamilyDescriptor column : entry.getValue().getColumnFamilies()) {
for (HColumnDescriptor column : entry.getValue().getFamilies()) {
if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
hasSerialScope = true;
break;

View File

@ -25,13 +25,12 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@ -46,30 +45,30 @@ public class AddColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(AddColumnFamilyProcedure.class);
private TableName tableName;
private TableDescriptor unmodifiedTableDescriptor;
private ColumnFamilyDescriptor cfDescriptor;
private HTableDescriptor unmodifiedHTableDescriptor;
private HColumnDescriptor cfDescriptor;
private List<HRegionInfo> regionInfoList;
private Boolean traceEnabled;
public AddColumnFamilyProcedure() {
super();
this.unmodifiedTableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
final ColumnFamilyDescriptor cfDescriptor) throws IOException {
final HColumnDescriptor cfDescriptor) throws IOException {
this(env, tableName, cfDescriptor, null);
}
public AddColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
this.unmodifiedTableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@ -173,10 +172,10 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
if (unmodifiedTableDescriptor != null) {
.setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
if (unmodifiedHTableDescriptor != null) {
addCFMsg
.setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
.setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
addCFMsg.build().writeDelimitedTo(stream);
@ -190,9 +189,9 @@ public class AddColumnFamilyProcedure
MasterProcedureProtos.AddColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(addCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(addCFMsg.getTableName());
cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(addCFMsg.getColumnfamilySchema());
cfDescriptor = ProtobufUtil.convertToHColumnDesc(addCFMsg.getColumnfamilySchema());
if (addCFMsg.hasUnmodifiedTableSchema()) {
unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(addCFMsg.getUnmodifiedTableSchema());
unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(addCFMsg.getUnmodifiedTableSchema());
}
}
@ -230,11 +229,11 @@ public class AddColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedTableDescriptor == null) {
throw new IOException("TableDescriptor missing for " + tableName);
unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedHTableDescriptor == null) {
throw new IOException("HTableDescriptor missing for " + tableName);
}
if (unmodifiedTableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
if (unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Column family '" + getColumnFamilyName()
+ "' in table '" + tableName + "' already exists so cannot be added");
}
@ -259,18 +258,17 @@ public class AddColumnFamilyProcedure
// Update table descriptor
LOG.info("AddColumn. Table = " + tableName + " HCD = " + cfDescriptor.toString());
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
if (htd.hasColumnFamily(cfDescriptor.getName())) {
if (htd.hasFamily(cfDescriptor.getName())) {
// It is possible to reach this situation, as we could already add the column family
// to table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
env.getMasterServices().getTableDescriptors().add(
TableDescriptorBuilder.newBuilder(htd)
.addColumnFamily(cfDescriptor).build());
htd.addFamily(cfDescriptor);
env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@ -279,14 +277,14 @@ public class AddColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
if (htd.hasColumnFamily(cfDescriptor.getName())) {
HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
if (htd.hasFamily(cfDescriptor.getName())) {
// Remove the column family from file system and update the table descriptor to
// the before-add-column-family-state
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(env, tableName,
getRegionInfoList(env), cfDescriptor.getName(), cfDescriptor.isMobEnabled());
env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);

View File

@ -33,12 +33,11 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -68,7 +67,7 @@ public class CloneSnapshotProcedure
extends AbstractStateMachineTableProcedure<CloneSnapshotState> {
private static final Log LOG = LogFactory.getLog(CloneSnapshotProcedure.class);
private TableDescriptor tableDescriptor;
private HTableDescriptor hTableDescriptor;
private SnapshotDescription snapshot;
private boolean restoreAcl;
private List<HRegionInfo> newRegions = null;
@ -86,21 +85,21 @@ public class CloneSnapshotProcedure
}
public CloneSnapshotProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
this(env, tableDescriptor, snapshot, false);
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
* @param tableDescriptor the table to operate on
* @param hTableDescriptor the table to operate on
* @param snapshot snapshot to clone from
*/
public CloneSnapshotProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final SnapshotDescription snapshot,
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
this.tableDescriptor = tableDescriptor;
this.hTableDescriptor = hTableDescriptor;
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@ -122,7 +121,7 @@ public class CloneSnapshotProcedure
Configuration conf = env.getMasterServices().getConfiguration();
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, tableDescriptor.getTableName(), conf);
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, hTableDescriptor.getTableName(), conf);
}
}
@ -142,7 +141,7 @@ public class CloneSnapshotProcedure
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT);
break;
case CLONE_SNAPSHOT_WRITE_FS_LAYOUT:
newRegions = createFilesystemLayout(env, tableDescriptor, newRegions);
newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META);
break;
case CLONE_SNAPSHOT_ADD_TO_META:
@ -225,7 +224,7 @@ public class CloneSnapshotProcedure
@Override
public TableName getTableName() {
return tableDescriptor.getTableName();
return hTableDescriptor.getTableName();
}
@Override
@ -251,7 +250,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
cloneSnapshotMsg.addRegionInfo(HRegionInfo.convert(hri));
@ -282,7 +281,7 @@ public class CloneSnapshotProcedure
MasterProcedureProtos.CloneSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(cloneSnapshotMsg.getUserInfo()));
snapshot = cloneSnapshotMsg.getSnapshot();
tableDescriptor = ProtobufUtil.toTableDescriptor(cloneSnapshotMsg.getTableSchema());
hTableDescriptor = ProtobufUtil.convertToHTableDesc(cloneSnapshotMsg.getTableSchema());
if (cloneSnapshotMsg.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@ -342,7 +341,7 @@ public class CloneSnapshotProcedure
final MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
if (cpHost != null) {
cpHost.preCreateTableAction(tableDescriptor, null, getUser());
cpHost.preCreateTableAction(hTableDescriptor, null, getUser());
}
}
@ -358,7 +357,7 @@ public class CloneSnapshotProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@ -369,9 +368,9 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFilesystemLayout(
final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> newRegions) throws IOException {
return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(
final MasterProcedureEnv env,
@ -391,7 +390,7 @@ public class CloneSnapshotProcedure
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot);
RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(
conf, fs, manifest, tableDescriptor, tableRootDir, monitorException, monitorStatus);
conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions();
// Clone operation should not have stuff to restore or remove
@ -430,7 +429,7 @@ public class CloneSnapshotProcedure
*/
private List<HRegionInfo> createFsLayout(
final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
@ -438,17 +437,17 @@ public class CloneSnapshotProcedure
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor);
final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(tempTableDir,
TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);
.createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(
env, tempdir, tableDescriptor.getTableName(), newRegions);
env, tempdir, hTableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
return newRegions;
}
@ -459,11 +458,11 @@ public class CloneSnapshotProcedure
* @throws IOException
*/
private void addRegionsToMeta(final MasterProcedureEnv env) throws IOException {
newRegions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, newRegions);
newRegions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, newRegions);
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
tableDescriptor, parentsToChildrenPairMap);
hTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(env.getMasterServices().getConnection(), newRegions);
}

View File

@ -30,12 +30,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -55,7 +55,7 @@ public class CreateTableProcedure
extends AbstractStateMachineTableProcedure<CreateTableState> {
private static final Log LOG = LogFactory.getLog(CreateTableProcedure.class);
private TableDescriptor tableDescriptor;
private HTableDescriptor hTableDescriptor;
private List<HRegionInfo> newRegions;
public CreateTableProcedure() {
@ -64,15 +64,15 @@ public class CreateTableProcedure
}
public CreateTableProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions) {
this(env, tableDescriptor, newRegions, null);
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions) {
this(env, hTableDescriptor, newRegions, null);
}
public CreateTableProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
final ProcedurePrepareLatch syncLatch) {
super(env, syncLatch);
this.tableDescriptor = tableDescriptor;
this.hTableDescriptor = hTableDescriptor;
this.newRegions = newRegions != null ? Lists.newArrayList(newRegions) : null;
}
@ -98,11 +98,11 @@ public class CreateTableProcedure
setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT);
break;
case CREATE_TABLE_WRITE_FS_LAYOUT:
newRegions = createFsLayout(env, tableDescriptor, newRegions);
newRegions = createFsLayout(env, hTableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META);
break;
case CREATE_TABLE_ADD_TO_META:
newRegions = addTableToMeta(env, tableDescriptor, newRegions);
newRegions = addTableToMeta(env, hTableDescriptor, newRegions);
setNextState(CreateTableState.CREATE_TABLE_ASSIGN_REGIONS);
break;
case CREATE_TABLE_ASSIGN_REGIONS:
@ -174,7 +174,7 @@ public class CreateTableProcedure
@Override
public TableName getTableName() {
return tableDescriptor.getTableName();
return hTableDescriptor.getTableName();
}
@Override
@ -189,7 +189,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData.Builder state =
MasterProcedureProtos.CreateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
if (newRegions != null) {
for (HRegionInfo hri: newRegions) {
state.addRegionInfo(HRegionInfo.convert(hri));
@ -205,7 +205,7 @@ public class CreateTableProcedure
MasterProcedureProtos.CreateTableStateData state =
MasterProcedureProtos.CreateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
if (state.getRegionInfoCount() == 0) {
newRegions = null;
} else {
@ -235,7 +235,7 @@ public class CreateTableProcedure
}
// check that we have at least 1 CF
if (tableDescriptor.getColumnFamilyCount() == 0) {
if (hTableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException("Table " +
getTableName().toString() + " should have at least one column family."));
return false;
@ -256,7 +256,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = newRegions == null ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
cpHost.preCreateTableAction(tableDescriptor, regions, getUser());
cpHost.preCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@ -266,7 +266,7 @@ public class CreateTableProcedure
if (cpHost != null) {
final HRegionInfo[] regions = (newRegions == null) ? null :
newRegions.toArray(new HRegionInfo[newRegions.size()]);
cpHost.postCompletedCreateTableAction(tableDescriptor, regions, getUser());
cpHost.postCompletedCreateTableAction(hTableDescriptor, regions, getUser());
}
}
@ -277,9 +277,9 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final List<HRegionInfo> newRegions)
final HTableDescriptor hTableDescriptor, final List<HRegionInfo> newRegions)
throws IOException {
return createFsLayout(env, tableDescriptor, newRegions, new CreateHdfsRegions() {
return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() {
@Override
public List<HRegionInfo> createHdfsRegions(final MasterProcedureEnv env,
final Path tableRootDir, final TableName tableName,
@ -287,40 +287,40 @@ public class CreateTableProcedure
HRegionInfo[] regions = newRegions != null ?
newRegions.toArray(new HRegionInfo[newRegions.size()]) : null;
return ModifyRegionUtils.createRegions(env.getMasterConfiguration(),
tableRootDir, tableDescriptor, regions, null);
tableRootDir, hTableDescriptor, regions, null);
}
});
}
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, List<HRegionInfo> newRegions,
final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
final CreateHdfsRegions hdfsRegionHandler) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tempdir = mfs.getTempDir();
// 1. Create Table Descriptor
// using a copy of descriptor, table will be created enabling first
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory(
tempTableDir, tableDescriptor, false);
tempTableDir, hTableDescriptor, false);
// 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
tableDescriptor.getTableName(), newRegions);
hTableDescriptor.getTableName(), newRegions);
// 3. Move Table temp directory to the hbase root location
moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir);
return newRegions;
}
protected static void moveTempDirectoryToHBaseRoot(
final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final Path tempTableDir) throws IOException {
final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
FileSystem fs = mfs.getFileSystem();
if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
throw new IOException("Couldn't delete " + tableDir);
@ -332,20 +332,20 @@ public class CreateTableProcedure
}
protected static List<HRegionInfo> addTableToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regions) throws IOException {
assert (regions != null && regions.size() > 0) : "expected at least 1 region, got " + regions;
ProcedureSyncWait.waitMetaRegions(env);
// Add replicas if needed
List<HRegionInfo> newRegions = addReplicas(env, tableDescriptor, regions);
List<HRegionInfo> newRegions = addReplicas(env, hTableDescriptor, regions);
// Add regions to META
addRegionsToMeta(env, tableDescriptor, newRegions);
addRegionsToMeta(env, hTableDescriptor, newRegions);
// Setup replication for region replicas if needed
if (tableDescriptor.getRegionReplication() > 1) {
if (hTableDescriptor.getRegionReplication() > 1) {
ServerRegionReplicaUtil.setupRegionReplicaReplication(env.getMasterConfiguration());
}
return newRegions;
@ -354,14 +354,14 @@ public class CreateTableProcedure
/**
* Create any replicas for the regions (the default replicas that was
* already created is passed to the method)
* @param tableDescriptor descriptor to use
* @param hTableDescriptor descriptor to use
* @param regions default replicas
* @return the combined list of default and non-default replicas
*/
private static List<HRegionInfo> addReplicas(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regions) {
int numRegionReplicas = tableDescriptor.getRegionReplication() - 1;
int numRegionReplicas = hTableDescriptor.getRegionReplication() - 1;
if (numRegionReplicas <= 0) {
return regions;
}
@ -394,10 +394,10 @@ public class CreateTableProcedure
* Add the specified set of regions to the hbase:meta table.
*/
private static void addRegionsToMeta(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final List<HRegionInfo> regionInfos) throws IOException {
MetaTableAccessor.addRegionsToMeta(env.getMasterServices().getConnection(),
regionInfos, tableDescriptor.getRegionReplication());
regionInfos, hTableDescriptor.getRegionReplication());
}
protected static void updateTableDescCache(final MasterProcedureEnv env,

View File

@ -26,11 +26,10 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -46,7 +45,7 @@ public class DeleteColumnFamilyProcedure
extends AbstractStateMachineTableProcedure<DeleteColumnFamilyState> {
private static final Log LOG = LogFactory.getLog(DeleteColumnFamilyProcedure.class);
private TableDescriptor unmodifiedTableDescriptor;
private HTableDescriptor unmodifiedHTableDescriptor;
private TableName tableName;
private byte [] familyName;
private boolean hasMob;
@ -56,7 +55,7 @@ public class DeleteColumnFamilyProcedure
public DeleteColumnFamilyProcedure() {
super();
this.unmodifiedTableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@ -71,7 +70,7 @@ public class DeleteColumnFamilyProcedure
super(env, latch);
this.tableName = tableName;
this.familyName = familyName;
this.unmodifiedTableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
}
@ -180,9 +179,9 @@ public class DeleteColumnFamilyProcedure
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setColumnfamilyName(UnsafeByteOperations.unsafeWrap(familyName));
if (unmodifiedTableDescriptor != null) {
if (unmodifiedHTableDescriptor != null) {
deleteCFMsg
.setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
.setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
deleteCFMsg.build().writeDelimitedTo(stream);
@ -198,7 +197,7 @@ public class DeleteColumnFamilyProcedure
familyName = deleteCFMsg.getColumnfamilyName().toByteArray();
if (deleteCFMsg.hasUnmodifiedTableSchema()) {
unmodifiedTableDescriptor = ProtobufUtil.toTableDescriptor(deleteCFMsg.getUnmodifiedTableSchema());
unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(deleteCFMsg.getUnmodifiedTableSchema());
}
}
@ -236,22 +235,22 @@ public class DeleteColumnFamilyProcedure
checkTableModifiable(env);
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
unmodifiedTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedTableDescriptor == null) {
throw new IOException("TableDescriptor missing for " + tableName);
unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedHTableDescriptor == null) {
throw new IOException("HTableDescriptor missing for " + tableName);
}
if (!unmodifiedTableDescriptor.hasColumnFamily(familyName)) {
if (!unmodifiedHTableDescriptor.hasFamily(familyName)) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be deleted");
}
if (unmodifiedTableDescriptor.getColumnFamilyCount() == 1) {
if (unmodifiedHTableDescriptor.getColumnFamilyCount() == 1) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' is the only column family in the table, so it cannot be deleted");
}
// whether mob family
hasMob = unmodifiedTableDescriptor.getColumnFamily(familyName).isMobEnabled();
hasMob = unmodifiedHTableDescriptor.getFamily(familyName).isMobEnabled();
}
/**
@ -273,17 +272,17 @@ public class DeleteColumnFamilyProcedure
// Update table descriptor
LOG.info("DeleteColumn. Table = " + tableName + " family = " + getColumnFamilyName());
TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
if (!htd.hasColumnFamily(familyName)) {
if (!htd.hasFamily(familyName)) {
// It is possible to reach this situation, as we could already delete the column family
// from table descriptor, but the master failover happens before we complete this state.
// We should be able to handle running this function multiple times without causing problem.
return;
}
env.getMasterServices().getTableDescriptors().add(
TableDescriptorBuilder.newBuilder(htd).removeColumnFamily(familyName).build());
htd.removeFamily(familyName);
env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@ -292,7 +291,7 @@ public class DeleteColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);

View File

@ -24,12 +24,11 @@ import java.io.OutputStream;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@ -44,28 +43,28 @@ public class ModifyColumnFamilyProcedure
private static final Log LOG = LogFactory.getLog(ModifyColumnFamilyProcedure.class);
private TableName tableName;
private TableDescriptor unmodifiedtableDescriptor;
private ColumnFamilyDescriptor cfDescriptor;
private HTableDescriptor unmodifiedHTableDescriptor;
private HColumnDescriptor cfDescriptor;
private Boolean traceEnabled;
public ModifyColumnFamilyProcedure() {
super();
this.unmodifiedtableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.traceEnabled = null;
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
final ColumnFamilyDescriptor cfDescriptor) {
final HColumnDescriptor cfDescriptor) {
this(env, tableName, cfDescriptor, null);
}
public ModifyColumnFamilyProcedure(final MasterProcedureEnv env, final TableName tableName,
final ColumnFamilyDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
final HColumnDescriptor cfDescriptor, final ProcedurePrepareLatch latch) {
super(env, latch);
this.tableName = tableName;
this.cfDescriptor = cfDescriptor;
this.unmodifiedtableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.traceEnabled = null;
}
@ -166,10 +165,10 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setTableName(ProtobufUtil.toProtoTableName(tableName))
.setColumnfamilySchema(ProtobufUtil.toColumnFamilySchema(cfDescriptor));
if (unmodifiedtableDescriptor != null) {
.setColumnfamilySchema(ProtobufUtil.convertToColumnFamilySchema(cfDescriptor));
if (unmodifiedHTableDescriptor != null) {
modifyCFMsg
.setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedtableDescriptor));
.setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
modifyCFMsg.build().writeDelimitedTo(stream);
@ -183,9 +182,9 @@ public class ModifyColumnFamilyProcedure
MasterProcedureProtos.ModifyColumnFamilyStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyCFMsg.getUserInfo()));
tableName = ProtobufUtil.toTableName(modifyCFMsg.getTableName());
cfDescriptor = ProtobufUtil.toColumnFamilyDescriptor(modifyCFMsg.getColumnfamilySchema());
cfDescriptor = ProtobufUtil.convertToHColumnDesc(modifyCFMsg.getColumnfamilySchema());
if (modifyCFMsg.hasUnmodifiedTableSchema()) {
unmodifiedtableDescriptor = ProtobufUtil.toTableDescriptor(modifyCFMsg.getUnmodifiedTableSchema());
unmodifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyCFMsg.getUnmodifiedTableSchema());
}
}
@ -222,11 +221,11 @@ public class ModifyColumnFamilyProcedure
// Checks whether the table is allowed to be modified.
checkTableModifiable(env);
unmodifiedtableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedtableDescriptor == null) {
throw new IOException("TableDescriptor missing for " + tableName);
unmodifiedHTableDescriptor = env.getMasterServices().getTableDescriptors().get(tableName);
if (unmodifiedHTableDescriptor == null) {
throw new IOException("HTableDescriptor missing for " + tableName);
}
if (!unmodifiedtableDescriptor.hasColumnFamily(cfDescriptor.getName())) {
if (!unmodifiedHTableDescriptor.hasFamily(cfDescriptor.getName())) {
throw new InvalidFamilyOperationException("Family '" + getColumnFamilyName()
+ "' does not exist, so it cannot be modified");
}
@ -251,9 +250,9 @@ public class ModifyColumnFamilyProcedure
// Update table descriptor
LOG.info("ModifyColumnFamily. Table = " + tableName + " HCD = " + cfDescriptor.toString());
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(env.getMasterServices().getTableDescriptors().get(tableName));
builder.modifyColumnFamily(cfDescriptor);
env.getMasterServices().getTableDescriptors().add(builder.build());
HTableDescriptor htd = env.getMasterServices().getTableDescriptors().get(tableName);
htd.modifyFamily(cfDescriptor);
env.getMasterServices().getTableDescriptors().add(htd);
}
/**
@ -262,7 +261,7 @@ public class ModifyColumnFamilyProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(unmodifiedtableDescriptor);
env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);

View File

@ -30,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -52,8 +52,8 @@ public class ModifyTableProcedure
extends AbstractStateMachineTableProcedure<ModifyTableState> {
private static final Log LOG = LogFactory.getLog(ModifyTableProcedure.class);
private TableDescriptor unmodifiedTableDescriptor = null;
private TableDescriptor modifiedTableDescriptor;
private HTableDescriptor unmodifiedHTableDescriptor = null;
private HTableDescriptor modifiedHTableDescriptor;
private boolean deleteColumnFamilyInModify;
private List<HRegionInfo> regionInfoList;
@ -64,19 +64,19 @@ public class ModifyTableProcedure
initilize();
}
public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd) {
public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd) {
this(env, htd, null);
}
public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd,
public ModifyTableProcedure(final MasterProcedureEnv env, final HTableDescriptor htd,
final ProcedurePrepareLatch latch) {
super(env, latch);
initilize();
this.modifiedTableDescriptor = htd;
this.modifiedHTableDescriptor = htd;
}
private void initilize() {
this.unmodifiedTableDescriptor = null;
this.unmodifiedHTableDescriptor = null;
this.regionInfoList = null;
this.traceEnabled = null;
this.deleteColumnFamilyInModify = false;
@ -104,7 +104,7 @@ public class ModifyTableProcedure
setNextState(ModifyTableState.MODIFY_TABLE_REMOVE_REPLICA_COLUMN);
break;
case MODIFY_TABLE_REMOVE_REPLICA_COLUMN:
updateReplicaColumnsIfNeeded(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
updateReplicaColumnsIfNeeded(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
if (deleteColumnFamilyInModify) {
setNextState(ModifyTableState.MODIFY_TABLE_DELETE_FS_LAYOUT);
} else {
@ -112,7 +112,7 @@ public class ModifyTableProcedure
}
break;
case MODIFY_TABLE_DELETE_FS_LAYOUT:
deleteFromFs(env, unmodifiedTableDescriptor, modifiedTableDescriptor);
deleteFromFs(env, unmodifiedHTableDescriptor, modifiedHTableDescriptor);
setNextState(ModifyTableState.MODIFY_TABLE_POST_OPERATION);
break;
case MODIFY_TABLE_POST_OPERATION:
@ -191,12 +191,12 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData.Builder modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor))
.setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor))
.setDeleteColumnFamilyInModify(deleteColumnFamilyInModify);
if (unmodifiedTableDescriptor != null) {
if (unmodifiedHTableDescriptor != null) {
modifyTableMsg
.setUnmodifiedTableSchema(ProtobufUtil.toTableSchema(unmodifiedTableDescriptor));
.setUnmodifiedTableSchema(ProtobufUtil.convertToTableSchema(unmodifiedHTableDescriptor));
}
modifyTableMsg.build().writeDelimitedTo(stream);
@ -209,18 +209,18 @@ public class ModifyTableProcedure
MasterProcedureProtos.ModifyTableStateData modifyTableMsg =
MasterProcedureProtos.ModifyTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(modifyTableMsg.getUserInfo()));
modifiedTableDescriptor = ProtobufUtil.toTableDescriptor(modifyTableMsg.getModifiedTableSchema());
modifiedHTableDescriptor = ProtobufUtil.convertToHTableDesc(modifyTableMsg.getModifiedTableSchema());
deleteColumnFamilyInModify = modifyTableMsg.getDeleteColumnFamilyInModify();
if (modifyTableMsg.hasUnmodifiedTableSchema()) {
unmodifiedTableDescriptor =
ProtobufUtil.toTableDescriptor(modifyTableMsg.getUnmodifiedTableSchema());
unmodifiedHTableDescriptor =
ProtobufUtil.convertToHTableDesc(modifyTableMsg.getUnmodifiedTableSchema());
}
}
@Override
public TableName getTableName() {
return modifiedTableDescriptor.getTableName();
return modifiedHTableDescriptor.getTableName();
}
@Override
@ -240,27 +240,27 @@ public class ModifyTableProcedure
}
// check that we have at least 1 CF
if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
// In order to update the descriptor, we need to retrieve the old descriptor for comparison.
this.unmodifiedTableDescriptor =
this.unmodifiedHTableDescriptor =
env.getMasterServices().getTableDescriptors().get(getTableName());
if (env.getMasterServices().getTableStateManager()
.isTableState(getTableName(), TableState.State.ENABLED)) {
if (modifiedTableDescriptor.getRegionReplication() != unmodifiedTableDescriptor
if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
.getRegionReplication()) {
throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
}
}
// Find out whether all column families in unmodifiedTableDescriptor also exists in
// the modifiedTableDescriptor. This is to determine whether we are safe to rollback.
final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames();
final Set<byte[]> newFamilies = modifiedTableDescriptor.getColumnFamilyNames();
// Find out whether all column families in unmodifiedHTableDescriptor also exists in
// the modifiedHTableDescriptor. This is to determine whether we are safe to rollback.
final Set<byte[]> oldFamilies = unmodifiedHTableDescriptor.getFamiliesKeys();
final Set<byte[]> newFamilies = modifiedHTableDescriptor.getFamiliesKeys();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
this.deleteColumnFamilyInModify = true;
@ -287,7 +287,7 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
}
/**
@ -296,10 +296,10 @@ public class ModifyTableProcedure
* @throws IOException
**/
private void restoreTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(unmodifiedTableDescriptor);
env.getMasterServices().getTableDescriptors().add(unmodifiedHTableDescriptor);
// delete any new column families from the modifiedTableDescriptor.
deleteFromFs(env, modifiedTableDescriptor, unmodifiedTableDescriptor);
// delete any new column families from the modifiedHTableDescriptor.
deleteFromFs(env, modifiedHTableDescriptor, unmodifiedHTableDescriptor);
// Make sure regions are opened after table descriptor is updated.
//reOpenAllRegionsIfTableIsOnline(env);
@ -312,17 +312,18 @@ public class ModifyTableProcedure
* @throws IOException
*/
private void deleteFromFs(final MasterProcedureEnv env,
final TableDescriptor oldTableDescriptor, final TableDescriptor newTableDescriptor)
final HTableDescriptor oldHTableDescriptor, final HTableDescriptor newHTableDescriptor)
throws IOException {
final Set<byte[]> oldFamilies = oldTableDescriptor.getColumnFamilyNames();
final Set<byte[]> newFamilies = newTableDescriptor.getColumnFamilyNames();
final Set<byte[]> oldFamilies = oldHTableDescriptor.getFamiliesKeys();
final Set<byte[]> newFamilies = newHTableDescriptor.getFamiliesKeys();
for (byte[] familyName : oldFamilies) {
if (!newFamilies.contains(familyName)) {
MasterDDLOperationHelper.deleteColumnFamilyFromFileSystem(
env,
getTableName(),
getRegionInfoList(env),
familyName, oldTableDescriptor.getColumnFamily(familyName).isMobEnabled());
familyName,
oldHTableDescriptor.getFamily(familyName).isMobEnabled());
}
}
}
@ -334,10 +335,10 @@ public class ModifyTableProcedure
*/
private void updateReplicaColumnsIfNeeded(
final MasterProcedureEnv env,
final TableDescriptor oldTableDescriptor,
final TableDescriptor newTableDescriptor) throws IOException {
final int oldReplicaCount = oldTableDescriptor.getRegionReplication();
final int newReplicaCount = newTableDescriptor.getRegionReplication();
final HTableDescriptor oldHTableDescriptor,
final HTableDescriptor newHTableDescriptor) throws IOException {
final int oldReplicaCount = oldHTableDescriptor.getRegionReplication();
final int newReplicaCount = newHTableDescriptor.getRegionReplication();
if (newReplicaCount < oldReplicaCount) {
Set<byte[]> tableRows = new HashSet<>();
@ -401,10 +402,10 @@ public class ModifyTableProcedure
if (cpHost != null) {
switch (state) {
case MODIFY_TABLE_PRE_OPERATION:
cpHost.preModifyTableAction(getTableName(), modifiedTableDescriptor, getUser());
cpHost.preModifyTableAction(getTableName(), modifiedHTableDescriptor, getUser());
break;
case MODIFY_TABLE_POST_OPERATION:
cpHost.postCompletedModifyTableAction(getTableName(), modifiedTableDescriptor,getUser());
cpHost.postCompletedModifyTableAction(getTableName(), modifiedHTableDescriptor,getUser());
break;
default:
throw new UnsupportedOperationException(this + " unhandled state=" + state);

View File

@ -33,12 +33,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -61,7 +61,7 @@ public class RestoreSnapshotProcedure
extends AbstractStateMachineTableProcedure<RestoreSnapshotState> {
private static final Log LOG = LogFactory.getLog(RestoreSnapshotProcedure.class);
private TableDescriptor modifiedTableDescriptor;
private HTableDescriptor modifiedHTableDescriptor;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
@ -82,24 +82,24 @@ public class RestoreSnapshotProcedure
}
public RestoreSnapshotProcedure(final MasterProcedureEnv env,
final TableDescriptor tableDescriptor, final SnapshotDescription snapshot) {
this(env, tableDescriptor, snapshot, false);
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
* @param tableDescriptor the table to operate on
* @param hTableDescriptor the table to operate on
* @param snapshot snapshot to restore from
* @throws IOException
*/
public RestoreSnapshotProcedure(
final MasterProcedureEnv env,
final TableDescriptor tableDescriptor,
final HTableDescriptor hTableDescriptor,
final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
// This is the new schema we are going to write out as this modification.
this.modifiedTableDescriptor = tableDescriptor;
this.modifiedHTableDescriptor = hTableDescriptor;
// Snapshot information
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
@ -204,7 +204,7 @@ public class RestoreSnapshotProcedure
@Override
public TableName getTableName() {
return modifiedTableDescriptor.getTableName();
return modifiedHTableDescriptor.getTableName();
}
@Override
@ -236,7 +236,7 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setSnapshot(this.snapshot)
.setModifiedTableSchema(ProtobufUtil.toTableSchema(modifiedTableDescriptor));
.setModifiedTableSchema(ProtobufUtil.convertToTableSchema(modifiedHTableDescriptor));
if (regionsToRestore != null) {
for (HRegionInfo hri: regionsToRestore) {
@ -278,8 +278,8 @@ public class RestoreSnapshotProcedure
MasterProcedureProtos.RestoreSnapshotStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(restoreSnapshotMsg.getUserInfo()));
snapshot = restoreSnapshotMsg.getSnapshot();
modifiedTableDescriptor =
ProtobufUtil.toTableDescriptor(restoreSnapshotMsg.getModifiedTableSchema());
modifiedHTableDescriptor =
ProtobufUtil.convertToHTableDesc(restoreSnapshotMsg.getModifiedTableSchema());
if (restoreSnapshotMsg.getRegionInfoForRestoreCount() == 0) {
regionsToRestore = null;
@ -333,7 +333,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().checkTableModifiable(tableName);
// Check that we have at least 1 CF
if (modifiedTableDescriptor.getColumnFamilyCount() == 0) {
if (modifiedHTableDescriptor.getColumnFamilyCount() == 0) {
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
}
@ -363,7 +363,7 @@ public class RestoreSnapshotProcedure
* @throws IOException
**/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor);
env.getMasterServices().getTableDescriptors().add(modifiedHTableDescriptor);
}
/**
@ -386,7 +386,7 @@ public class RestoreSnapshotProcedure
env.getMasterServices().getConfiguration(),
fs,
manifest,
modifiedTableDescriptor,
modifiedHTableDescriptor,
rootDir,
monitorException,
getMonitorStatus());
@ -440,19 +440,19 @@ public class RestoreSnapshotProcedure
MetaTableAccessor.addRegionsToMeta(
conn,
regionsToAdd,
modifiedTableDescriptor.getRegionReplication());
modifiedHTableDescriptor.getRegionReplication());
}
if (regionsToRestore != null) {
MetaTableAccessor.overwriteRegions(
conn,
regionsToRestore,
modifiedTableDescriptor.getRegionReplication());
modifiedHTableDescriptor.getRegionReplication());
}
RestoreSnapshotHelper.RestoreMetaChanges metaChanges =
new RestoreSnapshotHelper.RestoreMetaChanges(
modifiedTableDescriptor, parentsToChildrenPairMap);
modifiedHTableDescriptor, parentsToChildrenPairMap);
metaChanges.updateMetaParentRegions(conn, regionsToAdd);
// At this point the restore is complete.

View File

@ -28,11 +28,11 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.HBaseException;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -48,7 +48,7 @@ public class TruncateTableProcedure
private boolean preserveSplits;
private List<HRegionInfo> regions;
private TableDescriptor tableDescriptor;
private HTableDescriptor hTableDescriptor;
private TableName tableName;
public TruncateTableProcedure() {
@ -95,7 +95,7 @@ public class TruncateTableProcedure
setNextState(TruncateTableState.TRUNCATE_TABLE_REMOVE_FROM_META);
break;
case TRUNCATE_TABLE_REMOVE_FROM_META:
tableDescriptor = env.getMasterServices().getTableDescriptors()
hTableDescriptor = env.getMasterServices().getTableDescriptors()
.get(tableName);
DeleteTableProcedure.deleteFromMeta(env, getTableName(), regions);
DeleteTableProcedure.deleteAssignmentState(env, getTableName());
@ -105,26 +105,26 @@ public class TruncateTableProcedure
DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true);
if (!preserveSplits) {
// if we are not preserving splits, generate a new single region
regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(tableDescriptor, null));
regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null));
} else {
regions = recreateRegionInfo(regions);
}
setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT);
break;
case TRUNCATE_TABLE_CREATE_FS_LAYOUT:
regions = CreateTableProcedure.createFsLayout(env, tableDescriptor, regions);
regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions);
CreateTableProcedure.updateTableDescCache(env, getTableName());
setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META);
break;
case TRUNCATE_TABLE_ADD_TO_META:
regions = CreateTableProcedure.addTableToMeta(env, tableDescriptor, regions);
regions = CreateTableProcedure.addTableToMeta(env, hTableDescriptor, regions);
setNextState(TruncateTableState.TRUNCATE_TABLE_ASSIGN_REGIONS);
break;
case TRUNCATE_TABLE_ASSIGN_REGIONS:
CreateTableProcedure.setEnablingState(env, getTableName());
addChildProcedure(env.getAssignmentManager().createAssignProcedures(regions));
setNextState(TruncateTableState.TRUNCATE_TABLE_POST_OPERATION);
tableDescriptor = null;
hTableDescriptor = null;
regions = null;
break;
case TRUNCATE_TABLE_POST_OPERATION:
@ -216,8 +216,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.newBuilder()
.setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
.setPreserveSplits(preserveSplits);
if (tableDescriptor != null) {
state.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor));
if (hTableDescriptor != null) {
state.setTableSchema(ProtobufUtil.convertToTableSchema(hTableDescriptor));
} else {
state.setTableName(ProtobufUtil.toProtoTableName(tableName));
}
@ -237,8 +237,8 @@ public class TruncateTableProcedure
MasterProcedureProtos.TruncateTableStateData.parseDelimitedFrom(stream);
setUser(MasterProcedureUtil.toUserInfo(state.getUserInfo()));
if (state.hasTableSchema()) {
tableDescriptor = ProtobufUtil.toTableDescriptor(state.getTableSchema());
tableName = tableDescriptor.getTableName();
hTableDescriptor = ProtobufUtil.convertToHTableDesc(state.getTableSchema());
tableName = hTableDescriptor.getTableName();
} else {
tableName = ProtobufUtil.toTableName(state.getTableName());
}

View File

@ -30,9 +30,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@ -137,16 +137,16 @@ public final class MasterSnapshotVerifier {
* @param manifest snapshot manifest to inspect
*/
private void verifyTableInfo(final SnapshotManifest manifest) throws IOException {
TableDescriptor htd = manifest.getTableDescriptor();
HTableDescriptor htd = manifest.getTableDescriptor();
if (htd == null) {
throw new CorruptedSnapshotException("Missing Table Descriptor",
ProtobufUtil.createSnapshotDesc(snapshot));
}
if (!htd.getTableName().getNameAsString().equals(snapshot.getTable())) {
if (!htd.getNameAsString().equals(snapshot.getTable())) {
throw new CorruptedSnapshotException(
"Invalid Table Descriptor. Expected " + snapshot.getTable() + " name, got "
+ htd.getTableName().getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
+ htd.getNameAsString(), ProtobufUtil.createSnapshotDesc(snapshot));
}
}

View File

@ -38,13 +38,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.executor.ExecutorService;
@ -556,7 +555,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
cleanupSentinels();
// check to see if the table exists
TableDescriptor desc = null;
HTableDescriptor desc = null;
try {
desc = master.getTableDescriptors().get(
TableName.valueOf(snapshot.getTable()));
@ -680,10 +679,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
TableDescriptor htd = TableDescriptorBuilder.copy(tableName, snapshotTableDesc);
HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
if (cpHost != null) {
cpHost.preCloneSnapshot(reqSnapshot, htd);
}
@ -708,14 +707,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* The operation will fail if the destination table has a snapshot or restore in progress.
*
* @param snapshot Snapshot Descriptor
* @param tableDescriptor Table Descriptor of the table to create
* @param hTableDescriptor Table Descriptor of the table to create
* @param nonceKey unique identifier to prevent duplicated RPC
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot,
final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
TableName tableName = tableDescriptor.getTableName();
TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@ -730,7 +729,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
tableDescriptor, snapshot, restoreAcl),
hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
@ -766,7 +765,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
SnapshotDescription snapshot = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotManifest manifest = SnapshotManifest.open(master.getConfiguration(), fs,
snapshotDir, snapshot);
TableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
HTableDescriptor snapshotTableDesc = manifest.getTableDescriptor();
TableName tableName = TableName.valueOf(reqSnapshot.getTable());
// stop tracking "abandoned" handlers
@ -800,7 +799,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @throws IOException
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final TableDescriptor snapshotTableDesc,
final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@ -837,15 +836,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
* @param snapshot Snapshot Descriptor
* @param tableDescriptor Table Descriptor
* @param hTableDescriptor Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
*/
private synchronized long restoreSnapshot(final SnapshotDescription snapshot,
final TableDescriptor tableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
final TableName tableName = tableDescriptor.getTableName();
final TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table
if (isTakingSnapshot(tableName)) {
@ -860,7 +859,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
tableDescriptor, snapshot, restoreAcl),
hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;

View File

@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
@ -89,7 +89,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
protected final SnapshotManifest snapshotManifest;
protected final SnapshotManager snapshotManager;
protected TableDescriptor htd;
protected HTableDescriptor htd;
/**
* @param snapshot descriptor of the snapshot to take
@ -124,12 +124,12 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
"Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
}
private TableDescriptor loadTableDescriptor()
private HTableDescriptor loadTableDescriptor()
throws FileNotFoundException, IOException {
TableDescriptor htd =
HTableDescriptor htd =
this.master.getTableDescriptors().get(snapshotTable);
if (htd == null) {
throw new IOException("TableDescriptor missing for " + snapshotTable);
throw new IOException("HTableDescriptor missing for " + snapshotTable);
}
return htd;
}

View File

@ -26,15 +26,15 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -59,7 +59,7 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
* @param tableName The current table name.
* @param family The current family.
*/
public void cleanExpiredMobFiles(String tableName, ColumnFamilyDescriptor family) throws IOException {
public void cleanExpiredMobFiles(String tableName, HColumnDescriptor family) throws IOException {
Configuration conf = getConf();
TableName tn = TableName.valueOf(tableName);
FileSystem fs = FileSystem.get(conf);
@ -98,8 +98,8 @@ public class ExpiredMobFileCleaner extends Configured implements Tool {
Connection connection = ConnectionFactory.createConnection(getConf());
Admin admin = connection.getAdmin();
try {
TableDescriptor htd = admin.listTableDescriptor(tn);
ColumnFamilyDescriptor family = htd.getColumnFamily(Bytes.toBytes(familyName));
HTableDescriptor htd = admin.getTableDescriptor(tn);
HColumnDescriptor family = htd.getFamily(Bytes.toBytes(familyName));
if (family == null || !family.isMobEnabled()) {
throw new IOException("Column family " + familyName + " is not a MOB column family");
}

View File

@ -44,8 +44,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
@ -55,7 +57,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.crypto.Encryption;
@ -285,7 +286,7 @@ public final class MobUtils {
* @throws IOException
*/
public static void cleanExpiredMobFiles(FileSystem fs, Configuration conf, TableName tableName,
ColumnFamilyDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
HColumnDescriptor columnDescriptor, CacheConfig cacheConfig, long current)
throws IOException {
long timeToLive = columnDescriptor.getTimeToLive();
if (Integer.MAX_VALUE == timeToLive) {
@ -518,7 +519,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, String startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@ -542,7 +543,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createRefFileWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
HColumnDescriptor family, Path basePath, long maxKeyCount, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
return createWriter(conf, fs, family,
@ -569,7 +570,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext, boolean isCompaction)
throws IOException {
@ -595,7 +596,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createDelFileWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, String date, Path basePath, long maxKeyCount,
HColumnDescriptor family, String date, Path basePath, long maxKeyCount,
Compression.Algorithm compression, byte[] startKey, CacheConfig cacheConfig,
Encryption.Context cryptoContext)
throws IOException {
@ -622,7 +623,7 @@ public final class MobUtils {
* @throws IOException
*/
public static StoreFileWriter createWriter(Configuration conf, FileSystem fs,
ColumnFamilyDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
HColumnDescriptor family, MobFileName mobFileName, Path basePath, long maxKeyCount,
Compression.Algorithm compression, CacheConfig cacheConfig, Encryption.Context cryptoContext,
boolean isCompaction)
throws IOException {
@ -796,7 +797,7 @@ public final class MobUtils {
* @param allFiles Whether add all mob files into the compaction.
*/
public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
ColumnFamilyDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
throws IOException {
String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
PartitionedMobCompactor.class.getName());
@ -804,7 +805,7 @@ public final class MobUtils {
MobCompactor compactor = null;
try {
compactor = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
Configuration.class, FileSystem.class, TableName.class, ColumnFamilyDescriptor.class,
Configuration.class, FileSystem.class, TableName.class, HColumnDescriptor.class,
ExecutorService.class }, new Object[] { conf, fs, tableName, hcd, pool });
} catch (Exception e) {
throw new IOException("Unable to load configured mob file compactor '" + className + "'", e);
@ -856,9 +857,9 @@ public final class MobUtils {
* @param htd The current table descriptor.
* @return Whether this table has mob-enabled columns.
*/
public static boolean hasMobColumns(TableDescriptor htd) {
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
for (ColumnFamilyDescriptor hcd : hcds) {
public static boolean hasMobColumns(HTableDescriptor htd) {
HColumnDescriptor[] hcds = htd.getColumnFamilies();
for (HColumnDescriptor hcd : hcds) {
if (hcd.isMobEnabled()) {
return true;
}
@ -898,7 +899,7 @@ public final class MobUtils {
* @param fileDate The date string parsed from the mob file name.
* @return True if the mob file is expired.
*/
public static boolean isMobFileExpired(ColumnFamilyDescriptor column, long current, String fileDate) {
public static boolean isMobFileExpired(HColumnDescriptor column, long current, String fileDate) {
if (column.getMinVersions() > 0) {
return false;
}

View File

@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.util.FSUtils;
@ -42,14 +42,14 @@ public abstract class MobCompactor {
protected FileSystem fs;
protected Configuration conf;
protected TableName tableName;
protected ColumnFamilyDescriptor column;
protected HColumnDescriptor column;
protected Path mobTableDir;
protected Path mobFamilyDir;
protected ExecutorService pool;
public MobCompactor(Configuration conf, FileSystem fs, TableName tableName,
ColumnFamilyDescriptor column, ExecutorService pool) {
HColumnDescriptor column, ExecutorService pool) {
this.conf = conf;
this.fs = fs;
this.tableName = tableName;

View File

@ -45,13 +45,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ArrayBackedTag;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
import org.apache.hadoop.hbase.TagType;
import org.apache.hadoop.hbase.TagUtil;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
@ -109,7 +109,7 @@ public class PartitionedMobCompactor extends MobCompactor {
private Encryption.Context cryptoContext = Encryption.Context.NONE;
public PartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
ColumnFamilyDescriptor column, ExecutorService pool) throws IOException {
HColumnDescriptor column, ExecutorService pool) throws IOException {
super(conf, fs, tableName, column, pool);
mergeableSize = conf.getLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD,
MobConstants.DEFAULT_MOB_COMPACTION_MERGEABLE_THRESHOLD);

View File

@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.mapreduce.JobUtil;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@ -109,13 +109,13 @@ public class CompactionTool extends Configured implements Tool {
if (isFamilyDir(fs, path)) {
Path regionDir = path.getParent();
Path tableDir = regionDir.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
compactStoreFiles(tableDir, htd, hri,
path.getName(), compactOnce, major);
} else if (isRegionDir(fs, path)) {
Path tableDir = path.getParent();
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
compactRegion(tableDir, htd, path, compactOnce, major);
} else if (isTableDir(fs, path)) {
compactTable(path, compactOnce, major);
@ -127,13 +127,13 @@ public class CompactionTool extends Configured implements Tool {
private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
throws IOException {
TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
compactRegion(tableDir, htd, regionDir, compactOnce, major);
}
}
private void compactRegion(final Path tableDir, final TableDescriptor htd,
private void compactRegion(final Path tableDir, final HTableDescriptor htd,
final Path regionDir, final boolean compactOnce, final boolean major)
throws IOException {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
@ -147,7 +147,7 @@ public class CompactionTool extends Configured implements Tool {
* If the compact once flag is not specified, execute the compaction until
* no more compactions are needed. Uses the Configuration settings provided.
*/
private void compactStoreFiles(final Path tableDir, final TableDescriptor htd,
private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
final HRegionInfo hri, final String familyName, final boolean compactOnce,
final boolean major) throws IOException {
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
@ -177,7 +177,7 @@ public class CompactionTool extends Configured implements Tool {
* the store dir to compact as source.
*/
private static HStore getStore(final Configuration conf, final FileSystem fs,
final Path tableDir, final TableDescriptor htd, final HRegionInfo hri,
final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
final String familyName, final Path tempDir) throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
@Override
@ -186,7 +186,7 @@ public class CompactionTool extends Configured implements Tool {
}
};
HRegion region = new HRegion(regionFs, null, conf, htd, null);
return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf);
return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
}
}

View File

@ -41,12 +41,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.util.Bytes;
@ -340,8 +340,8 @@ public class HRegionFileSystem {
* @return true if region has reference file
* @throws IOException
*/
public boolean hasReferences(final TableDescriptor htd) throws IOException {
for (ColumnFamilyDescriptor family : htd.getColumnFamilies()) {
public boolean hasReferences(final HTableDescriptor htd) throws IOException {
for (HColumnDescriptor family : htd.getFamilies()) {
if (hasReferences(family.getNameAsString())) {
return true;
}

View File

@ -50,7 +50,6 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
@ -90,7 +89,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.locking.EntityLock;
import org.apache.hadoop.hbase.client.locking.LockServiceClient;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
@ -702,11 +700,7 @@ public class HRegionServer extends HasThread implements
protected TableDescriptors getFsTableDescriptors() throws IOException {
return new FSTableDescriptors(this.conf,
this.fs, this.rootDir, !canUpdateTableDescriptor(), false, getMetaTableObserver());
}
protected Function<TableDescriptorBuilder, TableDescriptorBuilder> getMetaTableObserver() {
return null;
this.fs, this.rootDir, !canUpdateTableDescriptor(), false);
}
protected void setInitLatch(CountDownLatch latch) {

View File

@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
@ -76,7 +77,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.VersionInfoUtil;
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
@ -1859,7 +1859,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
final int regionCount = request.getOpenInfoCount();
final Map<TableName, TableDescriptor> htds = new HashMap<>(regionCount);
final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
final boolean isBulkAssign = regionCount > 1;
try {
checkOpen();
@ -1898,7 +1898,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
TableDescriptor htd;
HTableDescriptor htd;
try {
String encodedName = region.getEncodedName();
byte[] encodedNameBytes = region.getEncodedNameAsBytes();
@ -2020,7 +2020,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
RegionInfo regionInfo = request.getRegionInfo();
final HRegionInfo region = HRegionInfo.convert(regionInfo);
TableDescriptor htd;
HTableDescriptor htd;
WarmupRegionResponse response = WarmupRegionResponse.getDefaultInstance();
try {

View File

@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenMetaHandler extends OpenRegionHandler {
public OpenMetaHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
final TableDescriptor htd, long masterSystemTime) {
final RegionServerServices rsServices, HRegionInfo regionInfo,
final HTableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_META);
}
}

View File

@ -19,10 +19,11 @@
package org.apache.hadoop.hbase.regionserver.handler;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
/**
@ -33,7 +34,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices;
@InterfaceAudience.Private
public class OpenPriorityRegionHandler extends OpenRegionHandler {
public OpenPriorityRegionHandler(Server server, RegionServerServices rsServices,
HRegionInfo regionInfo, TableDescriptor htd, long masterSystemTime) {
HRegionInfo regionInfo, HTableDescriptor htd, long masterSystemTime) {
super(server, rsServices, regionInfo, htd, masterSystemTime,
EventType.M_RS_OPEN_PRIORITY_REGION);
}

View File

@ -25,8 +25,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
@ -48,18 +48,18 @@ public class OpenRegionHandler extends EventHandler {
protected final RegionServerServices rsServices;
private final HRegionInfo regionInfo;
private final TableDescriptor htd;
private final HTableDescriptor htd;
private final long masterSystemTime;
public OpenRegionHandler(final Server server,
final RegionServerServices rsServices, HRegionInfo regionInfo,
TableDescriptor htd, long masterSystemTime) {
HTableDescriptor htd, long masterSystemTime) {
this(server, rsServices, regionInfo, htd, masterSystemTime, EventType.M_RS_OPEN_REGION);
}
protected OpenRegionHandler(final Server server,
final RegionServerServices rsServices, final HRegionInfo regionInfo,
final TableDescriptor htd, long masterSystemTime, EventType eventType) {
final RegionServerServices rsServices, final HRegionInfo regionInfo,
final HTableDescriptor htd, long masterSystemTime, EventType eventType) {
super(server, eventType);
this.rsServices = rsServices;
this.regionInfo = regionInfo;

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
@ -54,7 +55,6 @@ import org.apache.hadoop.hbase.client.RegionAdminServiceCallable;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.RetryingCallable;
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
@ -399,7 +399,7 @@ public class RegionReplicaReplicationEndpoint extends HBaseReplicationEndpoint {
if (requiresReplication == null) {
// check if the table requires memstore replication
// some unit-test drop the table, so we should do a bypass check and always replicate.
TableDescriptor htd = tableDescriptors.get(tableName);
HTableDescriptor htd = tableDescriptors.get(tableName);
requiresReplication = htd == null || htd.hasRegionMemstoreReplication();
memstoreReplicationEnabled.put(tableName, requiresReplication);
}

View File

@ -34,7 +34,6 @@ import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -44,6 +43,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor;
@ -125,7 +125,7 @@ public class RestoreSnapshotHelper {
private final SnapshotDescription snapshotDesc;
private final TableName snapshotTable;
private final TableDescriptor tableDesc;
private final HTableDescriptor tableDesc;
private final Path rootDir;
private final Path tableDir;
@ -136,7 +136,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
final TableDescriptor tableDescriptor,
final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status) {
@ -146,7 +146,7 @@ public class RestoreSnapshotHelper {
public RestoreSnapshotHelper(final Configuration conf,
final FileSystem fs,
final SnapshotManifest manifest,
final TableDescriptor tableDescriptor,
final HTableDescriptor tableDescriptor,
final Path rootDir,
final ForeignExceptionDispatcher monitor,
final MonitoredTask status,
@ -265,18 +265,18 @@ public class RestoreSnapshotHelper {
*/
public static class RestoreMetaChanges {
private final Map<String, Pair<String, String> > parentsMap;
private final TableDescriptor htd;
private final HTableDescriptor htd;
private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null;
public RestoreMetaChanges(TableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
public RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
this.parentsMap = parentsMap;
this.htd = htd;
}
public TableDescriptor getTableDescriptor() {
public HTableDescriptor getTableDescriptor() {
return htd;
}

View File

@ -36,10 +36,10 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -76,7 +76,7 @@ public final class SnapshotManifest {
private List<SnapshotRegionManifest> regionManifests;
private SnapshotDescription desc;
private TableDescriptor htd;
private HTableDescriptor htd;
private final ForeignExceptionSnare monitor;
private final Configuration conf;
@ -119,7 +119,7 @@ public final class SnapshotManifest {
/**
* Return a SnapshotManifest instance with the information already loaded in-memory.
* SnapshotManifest manifest = SnapshotManifest.open(...)
* TableDescriptor htd = manifest.getTableDescriptor()
* HTableDescriptor htd = manifest.getTableDescriptor()
* for (SnapshotRegionManifest regionManifest: manifest.getRegionManifests())
* hri = regionManifest.getRegionInfo()
* for (regionManifest.getFamilyFiles())
@ -136,7 +136,7 @@ public final class SnapshotManifest {
/**
* Add the table descriptor to the snapshot manifest
*/
public void addTableDescriptor(final TableDescriptor htd) throws IOException {
public void addTableDescriptor(final HTableDescriptor htd) throws IOException {
this.htd = htd;
}
@ -182,7 +182,7 @@ public final class SnapshotManifest {
LOG.debug("Creating references for mob files");
Path mobRegionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
// 2.1. build the snapshot reference for the store if it's a mob store
if (!hcd.isMobEnabled()) {
continue;
@ -377,7 +377,7 @@ public final class SnapshotManifest {
case SnapshotManifestV2.DESCRIPTOR_VERSION: {
SnapshotDataManifest dataManifest = readDataManifest();
if (dataManifest != null) {
htd = ProtobufUtil.toTableDescriptor(dataManifest.getTableSchema());
htd = ProtobufUtil.convertToHTableDesc(dataManifest.getTableSchema());
regionManifests = dataManifest.getRegionManifestsList();
} else {
// Compatibility, load the v1 regions
@ -429,7 +429,7 @@ public final class SnapshotManifest {
/**
* Get the table descriptor from the Snapshot
*/
public TableDescriptor getTableDescriptor() {
public HTableDescriptor getTableDescriptor() {
return this.htd;
}
@ -485,7 +485,7 @@ public final class SnapshotManifest {
}
SnapshotDataManifest.Builder dataManifestBuilder = SnapshotDataManifest.newBuilder();
dataManifestBuilder.setTableSchema(ProtobufUtil.toTableSchema(htd));
dataManifestBuilder.setTableSchema(ProtobufUtil.convertToTableSchema(htd));
if (v1Regions != null && v1Regions.size() > 0) {
dataManifestBuilder.addAllRegionManifests(v1Regions);

View File

@ -24,10 +24,11 @@ import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
import edu.umd.cs.findbugs.annotations.Nullable;
import org.apache.commons.lang.NotImplementedException;
import org.apache.commons.logging.Log;
@ -39,19 +40,17 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.com.google.common.primitives.Ints;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableInfoMissingException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.regionserver.BloomType;
/**
* Implementation of {@link TableDescriptors} that reads descriptors from the
@ -80,14 +79,10 @@ public class FSTableDescriptors implements TableDescriptors {
private volatile boolean usecache;
private volatile boolean fsvisited;
@VisibleForTesting
long cachehits = 0;
@VisibleForTesting
long invocations = 0;
@VisibleForTesting long cachehits = 0;
@VisibleForTesting long invocations = 0;
/**
* The file name prefix used to store HTD in HDFS
*/
/** The file name prefix used to store HTD in HDFS */
static final String TABLEINFO_FILE_PREFIX = ".tableinfo";
static final String TABLEINFO_DIR = ".tabledesc";
static final String TMP_DIR = ".tmp";
@ -95,12 +90,12 @@ public class FSTableDescriptors implements TableDescriptors {
// This cache does not age out the old stuff. Thinking is that the amount
// of data we keep up in here is so small, no need to do occasional purge.
// TODO.
private final Map<TableName, TableDescriptor> cache = new ConcurrentHashMap<>();
private final Map<TableName, HTableDescriptor> cache = new ConcurrentHashMap<>();
/**
* Table descriptor for <code>hbase:meta</code> catalog table
*/
private final TableDescriptor metaTableDescriptor;
private final HTableDescriptor metaTableDescriptor;
/**
* Construct a FSTableDescriptors instance using the hbase root dir of the given
@ -112,112 +107,91 @@ public class FSTableDescriptors implements TableDescriptors {
}
public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir)
throws IOException {
throws IOException {
this(conf, fs, rootdir, false, true);
}
/**
* @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs.
* operations; i.e. on remove, we do not do delete in fs.
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
this(conf, fs, rootdir, fsreadonly, usecache, null);
}
/**
* @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs.
* @param metaObserver Used by HMaster. It need to modify the META_REPLICAS_NUM for meta table descriptor.
* see HMaster#finishActiveMasterInitialization
* TODO: This is a workaround. Should remove this ugly code...
*/
public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache,
Function<TableDescriptorBuilder, TableDescriptorBuilder> metaObserver) throws IOException {
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
super();
this.fs = fs;
this.rootdir = rootdir;
this.fsreadonly = fsreadonly;
this.usecache = usecache;
this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf)
: metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
this.metaTableDescriptor = createMetaTableDescriptor(conf);
}
@VisibleForTesting
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException {
return TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
.build())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_BARRIER_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
.build())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_POSITION_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
.build())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.REPLICATION_META_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
.build())
.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(HConstants.TABLE_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true)
.build())
.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null);
}
@VisibleForTesting
public static TableDescriptor createMetaTableDescriptor(final Configuration conf)
public static HTableDescriptor createMetaTableDescriptor(final Configuration conf)
throws IOException {
return createMetaTableDescriptorBuilder(conf).build();
return new HTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
.addColumnFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true))
.addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_BARRIER_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true))
.addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_POSITION_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true))
.addColumnFamily(new HColumnDescriptor(HConstants.REPLICATION_META_FAMILY)
.setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
HConstants.DEFAULT_HBASE_META_VERSIONS))
.setInMemory(true)
.setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true))
.addColumnFamily(new HColumnDescriptor(HConstants.TABLE_FAMILY)
// Ten is arbitrary number. Keep versions to help debugging.
.setMaxVersions(10)
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
// Enable cache of data blocks in L1 if more than one caching tier deployed:
// e.g. if using CombinedBlockCache (BucketCache).
.setCacheDataInL1(true))
.addCoprocessor("org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint",
null, Coprocessor.PRIORITY_SYSTEM, null)
.build());
}
@Override
@ -245,7 +219,7 @@ public class FSTableDescriptors implements TableDescriptors {
*/
@Override
@Nullable
public TableDescriptor get(final TableName tablename)
public HTableDescriptor get(final TableName tablename)
throws IOException {
invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
@ -260,13 +234,13 @@ public class FSTableDescriptors implements TableDescriptors {
if (usecache) {
// Look in cache of descriptors.
TableDescriptor cachedtdm = this.cache.get(tablename);
HTableDescriptor cachedtdm = this.cache.get(tablename);
if (cachedtdm != null) {
cachehits++;
return cachedtdm;
}
}
TableDescriptor tdmt = null;
HTableDescriptor tdmt = null;
try {
tdmt = getTableDescriptorFromFs(fs, rootdir, tablename);
} catch (NullPointerException e) {
@ -290,21 +264,21 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, TableDescriptor> getAllDescriptors()
public Map<String, HTableDescriptor> getAllDescriptors()
throws IOException {
Map<String, TableDescriptor> tds = new TreeMap<>();
Map<String, HTableDescriptor> tds = new TreeMap<>();
if (fsvisited && usecache) {
for (Map.Entry<TableName, TableDescriptor> entry: this.cache.entrySet()) {
for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) {
tds.put(entry.getKey().toString(), entry.getValue());
}
// add hbase:meta to the response
tds.put(this.metaTableDescriptor.getTableName().getNameAsString(), metaTableDescriptor);
tds.put(this.metaTableDescriptor.getNameAsString(), metaTableDescriptor);
} else {
LOG.debug("Fetching table descriptors from the filesystem.");
boolean allvisited = true;
for (Path d : FSUtils.getTableDirs(fs, rootdir)) {
TableDescriptor htd = null;
HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@ -327,10 +301,10 @@ public class FSTableDescriptors implements TableDescriptors {
* Returns a map from table name to table descriptor for all tables.
*/
@Override
public Map<String, TableDescriptor> getAll() throws IOException {
Map<String, TableDescriptor> htds = new TreeMap<>();
Map<String, TableDescriptor> allDescriptors = getAllDescriptors();
for (Map.Entry<String, TableDescriptor> entry : allDescriptors
public Map<String, HTableDescriptor> getAll() throws IOException {
Map<String, HTableDescriptor> htds = new TreeMap<>();
Map<String, HTableDescriptor> allDescriptors = getAllDescriptors();
for (Map.Entry<String, HTableDescriptor> entry : allDescriptors
.entrySet()) {
htds.put(entry.getKey(), entry.getValue());
}
@ -342,13 +316,13 @@ public class FSTableDescriptors implements TableDescriptors {
* @see #get(org.apache.hadoop.hbase.TableName)
*/
@Override
public Map<String, TableDescriptor> getByNamespace(String name)
public Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException {
Map<String, TableDescriptor> htds = new TreeMap<>();
Map<String, HTableDescriptor> htds = new TreeMap<>();
List<Path> tableDirs =
FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
for (Path d: tableDirs) {
TableDescriptor htd = null;
HTableDescriptor htd = null;
try {
htd = get(FSUtils.getTableName(d));
} catch (FileNotFoundException fnfe) {
@ -366,7 +340,7 @@ public class FSTableDescriptors implements TableDescriptors {
* and updates the local cache with it.
*/
@Override
public void add(TableDescriptor htd) throws IOException {
public void add(HTableDescriptor htd) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
}
@ -377,7 +351,7 @@ public class FSTableDescriptors implements TableDescriptors {
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
+ htd.getTableName().getNameAsString());
+ htd.getNameAsString());
}
updateTableDescriptor(htd);
}
@ -388,7 +362,7 @@ public class FSTableDescriptors implements TableDescriptors {
* from the FileSystem.
*/
@Override
public TableDescriptor remove(final TableName tablename)
public HTableDescriptor remove(final TableName tablename)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot remove a table descriptor - in read only mode");
@ -399,7 +373,7 @@ public class FSTableDescriptors implements TableDescriptors {
throw new IOException("Failed delete of " + tabledir.toString());
}
}
TableDescriptor descriptor = this.cache.remove(tablename);
HTableDescriptor descriptor = this.cache.remove(tablename);
return descriptor;
}
@ -583,7 +557,7 @@ public class FSTableDescriptors implements TableDescriptors {
* if it exists, bypassing the local cache.
* Returns null if it's not found.
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName) throws IOException {
Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
return getTableDescriptorFromFs(fs, tableDir);
@ -594,7 +568,7 @@ public class FSTableDescriptors implements TableDescriptors {
* directly from the file system if it exists.
* @throws TableInfoMissingException if there is no descriptor
*/
public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir, false);
if (status == null) {
@ -603,7 +577,7 @@ public class FSTableDescriptors implements TableDescriptors {
return readTableDescriptor(fs, status);
}
private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status)
throws IOException {
int len = Ints.checkedCast(status.getLen());
byte [] content = new byte[len];
@ -613,9 +587,9 @@ public class FSTableDescriptors implements TableDescriptors {
} finally {
fsDataInputStream.close();
}
TableDescriptor htd = null;
HTableDescriptor htd = null;
try {
htd = TableDescriptorBuilder.parseFrom(content);
htd = HTableDescriptor.parseFrom(content);
} catch (DeserializationException e) {
throw new IOException("content=" + Bytes.toShort(content), e);
}
@ -627,7 +601,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException Thrown if failed update.
* @throws NotImplementedException if in read only mode
*/
@VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
@VisibleForTesting Path updateTableDescriptor(HTableDescriptor td)
throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
@ -689,7 +663,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @return Descriptor file or null if we failed write.
*/
private static Path writeTableDescriptor(final FileSystem fs,
final TableDescriptor htd, final Path tableDir,
final HTableDescriptor htd, final Path tableDir,
final FileStatus currentDescriptorFile)
throws IOException {
// Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@ -744,42 +718,42 @@ public class FSTableDescriptors implements TableDescriptors {
return tableInfoDirPath;
}
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
private static void writeTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
// We used to write this file out as a serialized HTD Writable followed by two '\n's and then
// the toString version of HTD. Now we just write out the pb serialization.
out.write(TableDescriptorBuilder.toByteArray(htd));
out.write(htd.toByteArray());
} finally {
out.close();
}
}
/**
* Create new TableDescriptor in HDFS. Happens when we are creating table.
* Create new HTableDescriptor in HDFS. Happens when we are creating table.
* Used by tests.
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
return createTableDescriptor(htd, false);
}
/**
* Create new TableDescriptor in HDFS. Happens when we are creating table. If
* Create new HTableDescriptor in HDFS. Happens when we are creating table. If
* forceCreation is true then even if previous table descriptor is present it
* will be overwritten
*
* @return True if we successfully created file.
*/
public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
throws IOException {
Path tableDir = getTableDir(htd.getTableName());
return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
}
/**
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
@ -790,7 +764,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @throws IOException if a filesystem error occurs
*/
public boolean createTableDescriptorForTableDirectory(Path tableDir,
TableDescriptor htd, boolean forceCreation) throws IOException {
HTableDescriptor htd, boolean forceCreation) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
}

View File

@ -17,10 +17,6 @@
*/
package org.apache.hadoop.hbase.util;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Joiner;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableList;
@ -88,9 +84,11 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MetaTableAccessor;
@ -941,7 +939,7 @@ public class HBaseFsck extends Configured implements Closeable {
TableName tableName = hi.getTableName();
TableInfo tableInfo = tablesInfo.get(tableName);
Preconditions.checkNotNull(tableInfo, "Table '" + tableName + "' not present!");
TableDescriptor template = tableInfo.getHTD();
HTableDescriptor template = tableInfo.getHTD();
// find min and max key values
Pair<byte[],byte[]> orphanRegionRange = null;
@ -1202,17 +1200,17 @@ public class HBaseFsck extends Configured implements Closeable {
*/
private void reportTablesInFlux() {
AtomicInteger numSkipped = new AtomicInteger(0);
TableDescriptor[] allTables = getTables(numSkipped);
HTableDescriptor[] allTables = getTables(numSkipped);
errors.print("Number of Tables: " + allTables.length);
if (details) {
if (numSkipped.get() > 0) {
errors.detail("Number of Tables in flux: " + numSkipped.get());
}
for (TableDescriptor td : allTables) {
for (HTableDescriptor td : allTables) {
errors.detail(" Table: " + td.getTableName() + "\t" +
(td.isReadOnly() ? "ro" : "rw") + "\t" +
(td.isMetaRegion() ? "META" : " ") + "\t" +
" families: " + td.getColumnFamilyCount());
" families: " + td.getFamilies().size());
}
}
}
@ -1316,7 +1314,7 @@ public class HBaseFsck extends Configured implements Closeable {
modTInfo = new TableInfo(tableName);
tablesInfo.put(tableName, modTInfo);
try {
TableDescriptor htd =
HTableDescriptor htd =
FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
modTInfo.htds.add(htd);
} catch (IOException ioe) {
@ -1363,17 +1361,17 @@ public class HBaseFsck extends Configured implements Closeable {
* To fabricate a .tableinfo file with following contents<br>
* 1. the correct tablename <br>
* 2. the correct colfamily list<br>
* 3. the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
* 3. the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
private boolean fabricateTableInfo(FSTableDescriptors fstd, TableName tableName,
Set<String> columns) throws IOException {
if (columns ==null || columns.isEmpty()) return false;
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
HTableDescriptor htd = new HTableDescriptor(tableName);
for (String columnfamimly : columns) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(columnfamimly));
htd.addFamily(new HColumnDescriptor(columnfamimly));
}
fstd.createTableDescriptor(builder.build(), true);
fstd.createTableDescriptor(htd, true);
return true;
}
@ -1398,7 +1396,7 @@ public class HBaseFsck extends Configured implements Closeable {
* 2. else create a default .tableinfo file with following items<br>
* &nbsp;2.1 the correct tablename <br>
* &nbsp;2.2 the correct colfamily list<br>
* &nbsp;2.3 the default properties for both {@link TableDescriptor} and {@link ColumnFamilyDescriptor}<br>
* &nbsp;2.3 the default properties for both {@link HTableDescriptor} and {@link HColumnDescriptor}<br>
* @throws IOException
*/
public void fixOrphanTables() throws IOException {
@ -1406,7 +1404,7 @@ public class HBaseFsck extends Configured implements Closeable {
List<TableName> tmpList = new ArrayList<>(orphanTableDirs.keySet().size());
tmpList.addAll(orphanTableDirs.keySet());
TableDescriptor[] htds = getTableDescriptors(tmpList);
HTableDescriptor[] htds = getHTableDescriptors(tmpList);
Iterator<Entry<TableName, Set<String>>> iter =
orphanTableDirs.entrySet().iterator();
int j = 0;
@ -1419,7 +1417,7 @@ public class HBaseFsck extends Configured implements Closeable {
LOG.info("Trying to fix orphan table error: " + tableName);
if (j < htds.length) {
if (tableName.equals(htds[j].getTableName())) {
TableDescriptor htd = htds[j];
HTableDescriptor htd = htds[j];
LOG.info("fixing orphan table: " + tableName + " from cache");
fstd.createTableDescriptor(htd, true);
j++;
@ -1428,7 +1426,7 @@ public class HBaseFsck extends Configured implements Closeable {
} else {
if (fabricateTableInfo(fstd, tableName, entry.getValue())) {
LOG.warn("fixing orphan table: " + tableName + " with a default .tableinfo file");
LOG.warn("Strongly recommend to modify the TableDescriptor if necessary for: " + tableName);
LOG.warn("Strongly recommend to modify the HTableDescriptor if necessary for: " + tableName);
iter.remove();
} else {
LOG.error("Unable to create default .tableinfo for " + tableName + " while missing column family information");
@ -1465,7 +1463,7 @@ public class HBaseFsck extends Configured implements Closeable {
Path rootdir = FSUtils.getRootDir(getConf());
Configuration c = getConf();
HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false);
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
@ -2648,8 +2646,8 @@ public class HBaseFsck extends Configured implements Closeable {
* regions reported for the table, but table dir is there in hdfs
*/
private void loadTableInfosForTablesWithNoRegion() throws IOException {
Map<String, TableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
for (TableDescriptor htd : allTables.values()) {
Map<String, HTableDescriptor> allTables = new FSTableDescriptors(getConf()).getAll();
for (HTableDescriptor htd : allTables.values()) {
if (checkMetaOnly && !htd.isMetaTable()) {
continue;
}
@ -2772,8 +2770,8 @@ public class HBaseFsck extends Configured implements Closeable {
// region split calculator
final RegionSplitCalculator<HbckInfo> sc = new RegionSplitCalculator<>(cmp);
// Histogram of different TableDescriptors found. Ideally there is only one!
final Set<TableDescriptor> htds = new HashSet<>();
// Histogram of different HTableDescriptors found. Ideally there is only one!
final Set<HTableDescriptor> htds = new HashSet<>();
// key = start split, values = set of splits in problem group
final Multimap<byte[], HbckInfo> overlapGroups =
@ -2790,9 +2788,9 @@ public class HBaseFsck extends Configured implements Closeable {
/**
* @return descriptor common to all regions. null if are none or multiple!
*/
private TableDescriptor getHTD() {
private HTableDescriptor getHTD() {
if (htds.size() == 1) {
return (TableDescriptor)htds.toArray()[0];
return (HTableDescriptor)htds.toArray()[0];
} else {
LOG.error("None/Multiple table descriptors found for table '"
+ tableName + "' regions: " + htds);
@ -2962,7 +2960,7 @@ public class HBaseFsck extends Configured implements Closeable {
"First region should start with an empty key. Creating a new " +
"region and regioninfo in HDFS to plug the hole.",
getTableInfo(), next);
TableDescriptor htd = getTableInfo().getHTD();
HTableDescriptor htd = getTableInfo().getHTD();
// from special EMPTY_START_ROW to next region's startKey
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(),
HConstants.EMPTY_START_ROW, next.getStartKey());
@ -2979,7 +2977,7 @@ public class HBaseFsck extends Configured implements Closeable {
errors.reportError(ERROR_CODE.LAST_REGION_ENDKEY_NOT_EMPTY,
"Last region should end with an empty key. Creating a new "
+ "region and regioninfo in HDFS to plug the hole.", getTableInfo());
TableDescriptor htd = getTableInfo().getHTD();
HTableDescriptor htd = getTableInfo().getHTD();
// from curEndKey to EMPTY_START_ROW
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey,
HConstants.EMPTY_START_ROW);
@ -3003,7 +3001,7 @@ public class HBaseFsck extends Configured implements Closeable {
+ Bytes.toStringBinary(holeStopKey)
+ ". Creating a new regioninfo and region "
+ "dir in hdfs to plug the hole.");
TableDescriptor htd = getTableInfo().getHTD();
HTableDescriptor htd = getTableInfo().getHTD();
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), holeStartKey, holeStopKey);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Plugged hole by creating new empty region: "+ newRegion + " " +region);
@ -3204,7 +3202,7 @@ public class HBaseFsck extends Configured implements Closeable {
}
// create new empty container region.
TableDescriptor htd = getTableInfo().getHTD();
HTableDescriptor htd = getTableInfo().getHTD();
// from start key to end Key
HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(),
range.getSecond());
@ -3505,7 +3503,7 @@ public class HBaseFsck extends Configured implements Closeable {
* @return tables that have not been modified recently
* @throws IOException if an error is encountered
*/
TableDescriptor[] getTables(AtomicInteger numSkipped) {
HTableDescriptor[] getTables(AtomicInteger numSkipped) {
List<TableName> tableNames = new ArrayList<>();
long now = EnvironmentEdgeManager.currentTime();
@ -3522,19 +3520,19 @@ public class HBaseFsck extends Configured implements Closeable {
}
}
}
return getTableDescriptors(tableNames);
return getHTableDescriptors(tableNames);
}
TableDescriptor[] getTableDescriptors(List<TableName> tableNames) {
LOG.info("getTableDescriptors == tableNames => " + tableNames);
HTableDescriptor[] getHTableDescriptors(List<TableName> tableNames) {
HTableDescriptor[] htd = new HTableDescriptor[0];
LOG.info("getHTableDescriptors == tableNames => " + tableNames);
try (Connection conn = ConnectionFactory.createConnection(getConf());
Admin admin = conn.getAdmin()) {
List<TableDescriptor> tds = admin.listTableDescriptors(tableNames);
return tds.toArray(new TableDescriptor[tds.size()]);
htd = admin.getTableDescriptorsByTableName(tableNames);
} catch (IOException e) {
LOG.debug("Exception getting table descriptors", e);
}
return new TableDescriptor[0];
return htd;
}
/**

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.ServerManager;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -188,7 +188,7 @@ public class HBaseFsckRepair {
* Creates, flushes, and closes a new region.
*/
public static HRegion createHDFSRegionDir(Configuration conf,
HRegionInfo hri, TableDescriptor htd) throws IOException {
HRegionInfo hri, HTableDescriptor htd) throws IOException {
// Create HRegion
Path root = FSUtils.getRootDir(conf);
HRegion region = HRegion.createHRegion(hri, root, conf, htd, null);

View File

@ -39,8 +39,9 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
/**
* Utility methods for interacting with the regions.
@ -60,13 +61,13 @@ public abstract class ModifyRegionUtils {
void editRegion(final HRegionInfo region) throws IOException;
}
public static HRegionInfo[] createHRegionInfos(TableDescriptor tableDescriptor,
public static HRegionInfo[] createHRegionInfos(HTableDescriptor hTableDescriptor,
byte[][] splitKeys) {
long regionId = System.currentTimeMillis();
HRegionInfo[] hRegionInfos = null;
if (splitKeys == null || splitKeys.length == 0) {
hRegionInfos = new HRegionInfo[]{
new HRegionInfo(tableDescriptor.getTableName(), null, null, false, regionId)
new HRegionInfo(hTableDescriptor.getTableName(), null, null, false, regionId)
};
} else {
int numRegions = splitKeys.length + 1;
@ -76,7 +77,7 @@ public abstract class ModifyRegionUtils {
for (int i = 0; i < numRegions; i++) {
endKey = (i == splitKeys.length) ? null : splitKeys[i];
hRegionInfos[i] =
new HRegionInfo(tableDescriptor.getTableName(), startKey, endKey,
new HRegionInfo(hTableDescriptor.getTableName(), startKey, endKey,
false, regionId);
startKey = endKey;
}
@ -90,20 +91,20 @@ public abstract class ModifyRegionUtils {
*
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final Configuration conf, final Path rootDir,
final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf,
"RegionOpenAndInitThread-" + tableDescriptor.getTableName(), regionNumber);
"RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber);
try {
return createRegions(exec, conf, rootDir, tableDescriptor, newRegions, task);
return createRegions(exec, conf, rootDir, hTableDescriptor, newRegions, task);
} finally {
exec.shutdownNow();
}
@ -116,15 +117,15 @@ public abstract class ModifyRegionUtils {
* @param exec Thread Pool Executor
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param hTableDescriptor description of the table
* @param newRegions {@link HRegionInfo} that describes the regions to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static List<HRegionInfo> createRegions(final ThreadPoolExecutor exec,
final Configuration conf, final Path rootDir,
final TableDescriptor tableDescriptor, final HRegionInfo[] newRegions,
final RegionFillTask task) throws IOException {
final Configuration conf, final Path rootDir,
final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions,
final RegionFillTask task) throws IOException {
if (newRegions == null) return null;
int regionNumber = newRegions.length;
CompletionService<HRegionInfo> completionService = new ExecutorCompletionService<>(exec);
@ -133,7 +134,7 @@ public abstract class ModifyRegionUtils {
completionService.submit(new Callable<HRegionInfo>() {
@Override
public HRegionInfo call() throws IOException {
return createRegion(conf, rootDir, tableDescriptor, newRegion, task);
return createRegion(conf, rootDir, hTableDescriptor, newRegion, task);
}
});
}
@ -155,20 +156,20 @@ public abstract class ModifyRegionUtils {
* Create new set of regions on the specified file-system.
* @param conf {@link Configuration}
* @param rootDir Root directory for HBase instance
* @param tableDescriptor description of the table
* @param hTableDescriptor description of the table
* @param newRegion {@link HRegionInfo} that describes the region to create
* @param task {@link RegionFillTask} custom code to populate region after creation
* @throws IOException
*/
public static HRegionInfo createRegion(final Configuration conf, final Path rootDir,
final TableDescriptor tableDescriptor, final HRegionInfo newRegion,
final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion,
final RegionFillTask task) throws IOException {
// 1. Create HRegion
// The WAL subsystem will use the default rootDir rather than the passed in rootDir
// unless I pass along via the conf.
Configuration confForWAL = new Configuration(conf);
confForWAL.set(HConstants.HBASE_DIR, rootDir.toString());
HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, tableDescriptor, null, false);
HRegion region = HRegion.createHRegion(newRegion, rootDir, conf, hTableDescriptor, null, false);
try {
// 2. Custom user code to interact with the created region
if (task != null) {

View File

@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.ImmutableHTableDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Result;
@ -467,20 +466,10 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
/**
* @return META table descriptor
* @deprecated since 2.0 version and will be removed in 3.0 version.
* use {@link #getMetaDescriptor()}
*/
@Deprecated
public HTableDescriptor getMetaTableDescriptor() {
return new ImmutableHTableDescriptor(getMetaTableDescriptorBuilder().build());
}
/**
* @return META table descriptor
*/
public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
try {
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME);
} catch (IOException e) {
throw new RuntimeException("Unable to create META table descriptor", e);
}

View File

@ -25,13 +25,10 @@ import java.io.IOException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.junit.Rule;
import org.junit.Test;
import org.junit.*;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
@ -49,9 +46,9 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(), false));
assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@Test
@ -62,7 +59,7 @@ public class TestFSTableDescriptorForceCreation {
// Cleanup old tests if any detritus laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.add(htd);
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
}
@ -74,7 +71,7 @@ public class TestFSTableDescriptorForceCreation {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
fstd.createTableDescriptor(htd, false);
assertTrue("Should create new table descriptor",
fstd.createTableDescriptor(htd, true));

View File

@ -24,8 +24,6 @@ import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@ -154,22 +152,22 @@ public class TestHColumnDescriptorDefaultVersions {
Admin admin = TEST_UTIL.getAdmin();
// Verify descriptor from master
TableDescriptor htd = admin.listTableDescriptor(tableName);
ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
HTableDescriptor htd = admin.getTableDescriptor(tableName);
HColumnDescriptor[] hcds = htd.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
HTableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
hcds = td.getColumnFamilies();
verifyHColumnDescriptor(expected, hcds, tableName, families);
}
private void verifyHColumnDescriptor(int expected, final ColumnFamilyDescriptor[] hcds,
private void verifyHColumnDescriptor(int expected, final HColumnDescriptor[] hcds,
final TableName tableName,
final byte[]... families) {
for (ColumnFamilyDescriptor hcd : hcds) {
for (HColumnDescriptor hcd : hcds) {
assertEquals(expected, hcd.getMaxVersions());
}
}

View File

@ -38,11 +38,14 @@ import java.util.regex.Pattern;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
@ -748,7 +751,7 @@ public class TestAsyncTableAdminApi extends TestAsyncAdminBase {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td =
HTableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}

View File

@ -23,14 +23,15 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.executor.ExecutorService;
@ -74,7 +75,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long createTable(
final TableDescriptor desc,
final HTableDescriptor desc,
final byte[][] splitKeys,
final long nonceGroup,
final long nonce) throws IOException {
@ -83,7 +84,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
public long createSystemTable(final TableDescriptor tableDescriptor) throws IOException {
public long createSystemTable(final HTableDescriptor hTableDescriptor) throws IOException {
return -1;
}
@ -266,7 +267,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
@Override
public long modifyTable(
final TableName tableName,
final TableDescriptor descriptor,
final HTableDescriptor descriptor,
final long nonceGroup,
final long nonce) throws IOException {
return -1;
@ -289,13 +290,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
public long addColumn(final TableName tableName, final ColumnFamilyDescriptor columnDescriptor,
public long addColumn(final TableName tableName, final HColumnDescriptor columnDescriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}
@Override
public long modifyColumn(final TableName tableName, final ColumnFamilyDescriptor descriptor,
public long modifyColumn(final TableName tableName, final HColumnDescriptor descriptor,
final long nonceGroup, final long nonce) throws IOException {
return -1;
}

View File

@ -24,19 +24,19 @@ import java.util.NavigableMap;
import java.util.SortedSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
@ -300,36 +300,36 @@ public class MockMasterServices extends MockNoopMasterServices {
public TableDescriptors getTableDescriptors() {
return new TableDescriptors() {
@Override
public TableDescriptor remove(TableName tablename) throws IOException {
public HTableDescriptor remove(TableName tablename) throws IOException {
// noop
return null;
}
@Override
public Map<String, TableDescriptor> getAll() throws IOException {
public Map<String, HTableDescriptor> getAll() throws IOException {
// noop
return null;
}
@Override public Map<String, TableDescriptor> getAllDescriptors() throws IOException {
@Override public Map<String, HTableDescriptor> getAllDescriptors() throws IOException {
// noop
return null;
}
@Override
public TableDescriptor get(TableName tablename) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tablename);
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(DEFAULT_COLUMN_FAMILY_NAME));
return builder.build();
public HTableDescriptor get(TableName tablename) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tablename);
htd.addFamily(new HColumnDescriptor(DEFAULT_COLUMN_FAMILY_NAME));
return htd;
}
@Override
public Map<String, TableDescriptor> getByNamespace(String name) throws IOException {
public Map<String, HTableDescriptor> getByNamespace(String name) throws IOException {
return null;
}
@Override
public void add(TableDescriptor htd) throws IOException {
public void add(HTableDescriptor htd) throws IOException {
// noop
}

View File

@ -33,23 +33,20 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterMetaBootstrap;
@ -64,7 +61,6 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.MD5Hash;
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@InterfaceAudience.Private
public class MasterProcedureTestingUtility {
private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);
@ -140,17 +136,17 @@ public class MasterProcedureTestingUtility {
// ==========================================================================
// Table Helpers
// ==========================================================================
public static TableDescriptor createHTD(final TableName tableName, final String... family) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
public static HTableDescriptor createHTD(final TableName tableName, final String... family) {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (int i = 0; i < family.length; ++i) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family[i]));
htd.addFamily(new HColumnDescriptor(family[i]));
}
return builder.build();
return htd;
}
public static HRegionInfo[] createTable(final ProcedureExecutor<MasterProcedureEnv> procExec,
final TableName tableName, final byte[][] splitKeys, String... family) throws IOException {
TableDescriptor htd = createHTD(tableName, family);
HTableDescriptor htd = createHTD(tableName, family);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = ProcedureTestingUtility.submitAndWait(procExec,
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@ -198,12 +194,12 @@ public class MasterProcedureTestingUtility {
assertEquals(regions.length, countMetaRegions(master, tableName));
// check htd
TableDescriptor htd = master.getTableDescriptors().get(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue("table descriptor not found", htd != null);
for (int i = 0; i < family.length; ++i) {
assertTrue("family not found " + family[i], htd.getColumnFamily(Bytes.toBytes(family[i])) != null);
assertTrue("family not found " + family[i], htd.getFamily(Bytes.toBytes(family[i])) != null);
}
assertEquals(family.length, htd.getColumnFamilyCount());
assertEquals(family.length, htd.getFamilies().size());
}
public static void validateTableDeletion(
@ -271,18 +267,18 @@ public class MasterProcedureTestingUtility {
public static void validateColumnFamilyAddition(final HMaster master, final TableName tableName,
final String family) throws IOException {
TableDescriptor htd = master.getTableDescriptors().get(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
assertTrue(htd.hasColumnFamily(family.getBytes()));
assertTrue(htd.hasFamily(family.getBytes()));
}
public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName,
final String family) throws IOException {
// verify htd
TableDescriptor htd = master.getTableDescriptors().get(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
assertFalse(htd.hasColumnFamily(family.getBytes()));
assertFalse(htd.hasFamily(family.getBytes()));
// verify fs
final FileSystem fs = master.getMasterFileSystem().getFileSystem();
@ -294,13 +290,13 @@ public class MasterProcedureTestingUtility {
}
public static void validateColumnFamilyModification(final HMaster master,
final TableName tableName, final String family, ColumnFamilyDescriptor columnDescriptor)
final TableName tableName, final String family, HColumnDescriptor columnDescriptor)
throws IOException {
TableDescriptor htd = master.getTableDescriptors().get(tableName);
HTableDescriptor htd = master.getTableDescriptors().get(tableName);
assertTrue(htd != null);
ColumnFamilyDescriptor hcfd = htd.getColumnFamily(family.getBytes());
assertEquals(0, ColumnFamilyDescriptor.COMPARATOR.compare(hcfd, columnDescriptor));
HColumnDescriptor hcfd = htd.getFamily(family.getBytes());
assertTrue(hcfd.equals(columnDescriptor));
}
public static void loadData(final Connection connection, final TableName tableName,

View File

@ -22,11 +22,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@ -78,11 +76,10 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableName tableName = TableName.valueOf(name.getMethodName());
// create table with 0 families will fail
final TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName));
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName);
// disable sanity check
builder.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
TableDescriptor htd = builder.build();
htd.setConfiguration("hbase.table.sanity.checks", Boolean.FALSE.toString());
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
long procId =
@ -99,7 +96,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
public void testCreateExisting() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f");
final HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
// create the table
@ -128,7 +125,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@ -141,21 +138,18 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
@Test(timeout=90000)
public void testRollbackAndDoubleExecution() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
testRollbackAndDoubleExecution(TableDescriptorBuilder.newBuilder(MasterProcedureTestingUtility.createHTD(tableName, F1, F2)));
testRollbackAndDoubleExecution(MasterProcedureTestingUtility.createHTD(tableName, F1, F2));
}
@Test(timeout=90000)
public void testRollbackAndDoubleExecutionOnMobTable() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd)
.modifyColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(htd.getColumnFamily(Bytes.toBytes(F1)))
.setMobEnabled(true)
.build());
testRollbackAndDoubleExecution(builder);
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, F1, F2);
htd.getFamily(Bytes.toBytes(F1)).setMobEnabled(true);
testRollbackAndDoubleExecution(htd);
}
private void testRollbackAndDoubleExecution(TableDescriptorBuilder builder) throws Exception {
private void testRollbackAndDoubleExecution(HTableDescriptor htd) throws Exception {
// create the table
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
@ -164,8 +158,7 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
final byte[][] splitKeys = new byte[][] {
Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c")
};
builder.setRegionReplication(3);
TableDescriptor htd = builder.build();
htd.setRegionReplication(3);
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));
@ -188,9 +181,9 @@ public class TestCreateTableProcedure extends TestTableDDLProcedureBase {
splitKeys[i] = Bytes.toBytes(String.format("%08d", i));
}
final TableDescriptor htd = MasterProcedureTestingUtility.createHTD(
final HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(
TableName.valueOf("TestMRegions"), F1, F2);
UTIL.getAdmin().createTableAsync(htd, splitKeys)
UTIL.getHBaseAdmin().createTableAsync(htd, splitKeys)
.get(10, java.util.concurrent.TimeUnit.HOURS);
LOG.info("TABLE CREATED");
}

View File

@ -27,8 +27,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@ -112,7 +112,7 @@ public class TestMasterFailoverWithProcedures {
// Start the Create procedure && kill the executor
byte[][] splitKeys = null;
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(tableName, "f1", "f2");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, splitKeys);
long procId = procExec.submitProcedure(
new CreateTableProcedure(procExec.getEnvironment(), htd, regions));

View File

@ -31,8 +31,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
@ -135,7 +135,7 @@ public class TestMasterProcedureWalLease {
backupStore3.recoverLease();
// Try to trigger a command on the master (WAL lease expired on the active one)
TableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
HTableDescriptor htd = MasterProcedureTestingUtility.createHTD(TableName.valueOf(name.getMethodName()), "f");
HRegionInfo[] regions = ModifyRegionUtils.createHRegionInfos(htd, null);
LOG.debug("submit proc");
try {

View File

@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.InvalidFamilyOperationException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -275,14 +274,14 @@ public class TestTableDescriptorModificationFromClient {
// Verify descriptor from HDFS
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
TableDescriptor td =
HTableDescriptor td =
FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
verifyTableDescriptor(td, tableName, families);
}
private void verifyTableDescriptor(final TableDescriptor htd,
private void verifyTableDescriptor(final HTableDescriptor htd,
final TableName tableName, final byte[]... families) {
Set<byte[]> htdFamilies = htd.getColumnFamilyNames();
Set<byte[]> htdFamilies = htd.getFamiliesKeys();
assertEquals(tableName, htd.getTableName());
assertEquals(families.length, htdFamilies.size());
for (byte[] familyName: families) {

View File

@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.MobCompactPartitionPolicy;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
@ -515,7 +514,7 @@ public class TestPartitionedMobCompactor {
CacheConfig cacheConfig = null;
MyPartitionedMobCompactor(Configuration conf, FileSystem fs, TableName tableName,
ColumnFamilyDescriptor column, ExecutorService pool, final int delPartitionSize,
HColumnDescriptor column, ExecutorService pool, final int delPartitionSize,
final CacheConfig cacheConf, final int PartitionsIncludeDelFiles)
throws IOException {
super(conf, fs, tableName, column, pool);

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -80,11 +79,10 @@ public class TestGetClosestAtOrBefore {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k.
TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder()
.setMemStoreFlushSize(64 * 1024 * 1024);
UTIL.getMetaTableDescriptor().setMemStoreFlushSize(64 * 1024 * 1024);
Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
rootdir, this.conf, metaBuilder.build());
rootdir, this.conf, UTIL.getMetaTableDescriptor());
try {
// Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) {

View File

@ -39,22 +39,22 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.CoordinatedStateManager;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterRpcServices;
@ -207,7 +207,7 @@ public class TestRegionMergeTransactionOnCluster {
List<Pair<HRegionInfo, ServerName>> tableRegions = MetaTableAccessor
.getTableRegionsAndLocations(MASTER.getConnection(), tableName);
HRegionInfo mergedRegionInfo = tableRegions.get(0).getFirst();
TableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
HTableDescriptor tableDescriptor = MASTER.getTableDescriptors().get(
tableName);
Result mergedRegionResult = MetaTableAccessor.getRegionResult(
MASTER.getConnection(), mergedRegionInfo.getRegionName());
@ -231,11 +231,11 @@ public class TestRegionMergeTransactionOnCluster {
assertTrue(fs.exists(regionAdir));
assertTrue(fs.exists(regionBdir));
ColumnFamilyDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
HColumnDescriptor[] columnFamilies = tableDescriptor.getColumnFamilies();
HRegionFileSystem hrfs = new HRegionFileSystem(
TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo);
int count = 0;
for(ColumnFamilyDescriptor colFamily : columnFamilies) {
for(HColumnDescriptor colFamily : columnFamilies) {
count += hrfs.getStoreFiles(colFamily.getName()).size();
}
ADMIN.compactRegion(mergedRegionInfo.getRegionName());
@ -244,7 +244,7 @@ public class TestRegionMergeTransactionOnCluster {
long timeout = System.currentTimeMillis() + waitTime;
int newcount = 0;
while (System.currentTimeMillis() < timeout) {
for(ColumnFamilyDescriptor colFamily : columnFamilies) {
for(HColumnDescriptor colFamily : columnFamilies) {
newcount += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount > count) {
@ -263,7 +263,7 @@ public class TestRegionMergeTransactionOnCluster {
}
while (System.currentTimeMillis() < timeout) {
int newcount1 = 0;
for(ColumnFamilyDescriptor colFamily : columnFamilies) {
for(HColumnDescriptor colFamily : columnFamilies) {
newcount1 += hrfs.getStoreFiles(colFamily.getName()).size();
}
if(newcount1 <= 1) {

View File

@ -26,13 +26,13 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@ -261,7 +261,7 @@ public class TestRegionServerNoMaster {
hri.getEncodedNameAsBytes()));
// Let's start the open handler
TableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
HTableDescriptor htd = getRS().tableDescriptors.get(hri.getTable());
getRS().service.submit(new OpenRegionHandler(getRS(), getRS(), hri, htd, -1));

View File

@ -36,20 +36,20 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
@ -652,30 +652,31 @@ public class SecureTestUtil {
public static Table createTable(HBaseTestingUtility testUtil, TableName tableName,
byte[][] families) throws Exception {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family : families) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
createTable(testUtil, testUtil.getAdmin(), builder.build());
return testUtil.getConnection().getTable(tableName);
createTable(testUtil, testUtil.getAdmin(), htd);
return testUtil.getConnection().getTable(htd.getTableName());
}
public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd)
public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd)
throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd);
}
public static void createTable(HBaseTestingUtility testUtil, TableDescriptor htd,
public static void createTable(HBaseTestingUtility testUtil, HTableDescriptor htd,
byte[][] splitKeys) throws Exception {
createTable(testUtil, testUtil.getAdmin(), htd, splitKeys);
}
public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd)
public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd)
throws Exception {
createTable(testUtil, admin, htd, null);
}
public static void createTable(HBaseTestingUtility testUtil, Admin admin, TableDescriptor htd,
public static void createTable(HBaseTestingUtility testUtil, Admin admin, HTableDescriptor htd,
byte[][] splitKeys) throws Exception {
// NOTE: We need a latch because admin is not sync,
// so the postOp coprocessor method may be called after the admin operation returned.

View File

@ -24,20 +24,18 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Assert;
public class MobSnapshotTestingUtils {
@ -62,17 +60,15 @@ public class MobSnapshotTestingUtils {
private static void createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[][] splitKeys, int regionReplication,
final byte[]... families) throws IOException, InterruptedException {
TableDescriptorBuilder builder
= TableDescriptorBuilder.newBuilder(tableName)
.setRegionReplication(regionReplication);
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
for (byte[] family : families) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(family)
.setMobEnabled(true)
.setMobThreshold(0L)
.build());
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMobEnabled(true);
hcd.setMobThreshold(0L);
htd.addFamily(hcd);
}
util.getAdmin().createTable(builder.build(), splitKeys);
util.getAdmin().createTable(htd, splitKeys);
SnapshotTestingUtils.waitForTableToBeOnline(util, tableName);
assertEquals((splitKeys.length + 1) * regionReplication, util
.getAdmin().getTableRegions(tableName).size());
@ -84,29 +80,29 @@ public class MobSnapshotTestingUtils {
* @param util
* @param tableName
* @param families
* @return An Table instance for the created table.
* @return An HTable instance for the created table.
* @throws IOException
*/
public static Table createMobTable(final HBaseTestingUtility util,
final TableName tableName, final byte[]... families) throws IOException {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
HTableDescriptor htd = new HTableDescriptor(tableName);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them
// here because
// tests have hard coded counts of what to expect in block cache, etc.,
// and blooms being
// on is interfering.
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family)
.setBloomFilterType(BloomType.NONE)
.setMobEnabled(true)
.setMobThreshold(0L)
.build());
hcd.setBloomFilterType(BloomType.NONE);
hcd.setMobEnabled(true);
hcd.setMobThreshold(0L);
htd.addFamily(hcd);
}
util.getAdmin().createTable(builder.build());
util.getAdmin().createTable(htd);
// HBaseAdmin only waits for regions to appear in hbase:meta we should wait
// until they are assigned
util.waitUntilAllRegionsAssigned(tableName);
return ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName);
util.waitUntilAllRegionsAssigned(htd.getTableName());
return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
}
/**
@ -150,14 +146,13 @@ public class MobSnapshotTestingUtils {
}
@Override
public TableDescriptor createHtd(final String tableName) {
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
.addColumnFamily(ColumnFamilyDescriptorBuilder
.newBuilder(Bytes.toBytes(TEST_FAMILY))
.setMobEnabled(true)
.setMobThreshold(0L)
.build())
.build();
public HTableDescriptor createHtd(final String tableName) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMobEnabled(true);
hcd.setMobThreshold(0L);
htd.addFamily(hcd);
return htd;
}
}
}

View File

@ -40,35 +40,36 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSVisitor;
@ -491,7 +492,7 @@ public final class SnapshotTestingUtils {
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final SnapshotProtos.SnapshotDescription desc;
private final TableDescriptor htd;
private final HTableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
private final Path rootDir;
@ -499,7 +500,7 @@ public final class SnapshotTestingUtils {
private int snapshotted = 0;
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
final Path rootDir, final TableDescriptor htd,
final Path rootDir, final HTableDescriptor htd,
final SnapshotProtos.SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
@ -513,7 +514,7 @@ public final class SnapshotTestingUtils {
.createTableDescriptorForTableDirectory(snapshotDir, htd, false);
}
public TableDescriptor getTableDescriptor() {
public HTableDescriptor getTableDescriptor() {
return this.htd;
}
@ -679,11 +680,11 @@ public final class SnapshotTestingUtils {
private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
final int numRegions, final int version) throws IOException {
TableDescriptor htd = createHtd(tableName);
HTableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder()
.setTable(htd.getTableName().getNameAsString())
.setTable(htd.getNameAsString())
.setName(snapshotName)
.setVersion(version)
.build();
@ -693,13 +694,13 @@ public final class SnapshotTestingUtils {
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}
public TableDescriptor createHtd(final String tableName) {
return TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
.addColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY))
.build();
public HTableDescriptor createHtd(final String tableName) {
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName));
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));
return htd;
}
private RegionData[] createTable(final TableDescriptor htd, final int nregions)
private RegionData[] createTable(final HTableDescriptor htd, final int nregions)
throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false);
@ -765,15 +766,14 @@ public final class SnapshotTestingUtils {
public static void createTable(final HBaseTestingUtility util, final TableName tableName,
int regionReplication, int nRegions, final byte[]... families)
throws IOException, InterruptedException {
TableDescriptorBuilder builder
= TableDescriptorBuilder
.newBuilder(tableName)
.setRegionReplication(regionReplication);
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.setRegionReplication(regionReplication);
for (byte[] family : families) {
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
htd.addFamily(hcd);
}
byte[][] splitKeys = getSplitKeys(nRegions);
util.createTable(builder.build(), splitKeys);
util.createTable(htd, splitKeys);
assertEquals((splitKeys.length + 1) * regionReplication,
util.getAdmin().getTableRegions(tableName).size());
}

View File

@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@ -104,11 +104,11 @@ public class TestRestoreSnapshotHelper {
builder.addRegionV2();
builder.addRegionV1();
Path snapshotDir = builder.commit();
TableDescriptor htd = builder.getTableDescriptor();
HTableDescriptor htd = builder.getTableDescriptor();
SnapshotDescription desc = builder.getSnapshotDescription();
// Test clone a snapshot
TableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
testRestore(snapshotDir, desc, htdClone);
verifyRestore(rootDir, htd, htdClone);
@ -118,13 +118,13 @@ public class TestRestoreSnapshotHelper {
.setTable("testtb-clone")
.build();
Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
TableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
testRestore(cloneDir, cloneDesc, htdClone2);
verifyRestore(rootDir, htd, htdClone2);
}
private void verifyRestore(final Path rootDir, final TableDescriptor sourceHtd,
final TableDescriptor htdClone) throws IOException {
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
final HTableDescriptor htdClone) throws IOException {
List<String> files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.size());
@ -148,7 +148,7 @@ public class TestRestoreSnapshotHelper {
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
private void testRestore(final Path snapshotDir, final SnapshotDescription sd,
final TableDescriptor htdClone) throws IOException {
final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
@ -164,7 +164,7 @@ public class TestRestoreSnapshotHelper {
* Initialize the restore helper, based on the snapshot and table information provided.
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
final SnapshotDescription sd, final TableDescriptor htdClone) throws IOException {
final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
@ -129,7 +129,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder =
SnapshotRegionManifest.newBuilder();
for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));
@ -150,7 +150,7 @@ public class TestSnapshotManifest {
}
dataManifestBuilder
.setTableSchema(ProtobufUtil.toTableSchema(builder.getTableDescriptor()));
.setTableSchema(ProtobufUtil.convertToTableSchema(builder.getTableDescriptor()));
SnapshotDataManifest dataManifest = dataManifestBuilder.build();
return writeDataManifest(dataManifest);
@ -163,7 +163,7 @@ public class TestSnapshotManifest {
SnapshotRegionManifest.Builder dataRegionManifestBuilder = SnapshotRegionManifest.newBuilder();
dataRegionManifestBuilder.setRegionInfo(HRegionInfo.convert(regionInfo));
for (ColumnFamilyDescriptor hcd: builder.getTableDescriptor().getColumnFamilies()) {
for (HColumnDescriptor hcd: builder.getTableDescriptor().getFamilies()) {
SnapshotRegionManifest.FamilyFiles.Builder family =
SnapshotRegionManifest.FamilyFiles.newBuilder();
family.setFamilyName(UnsafeByteOperations.unsafeWrap(hcd.getName()));

View File

@ -40,12 +40,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
@ -79,7 +78,7 @@ public class TestFSTableDescriptors {
@Test
public void testCreateAndUpdate() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
@ -99,7 +98,7 @@ public class TestFSTableDescriptors {
@Test
public void testSequenceIdAdvancesOnTableInfo() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
Path p0 = fstd.updateTableDescriptor(htd);
@ -119,7 +118,7 @@ public class TestFSTableDescriptors {
assertTrue(!fs.exists(p2));
int i3 = FSTableDescriptors.getTableInfoSequenceId(p3);
assertTrue(i3 == i2 + 1);
TableDescriptor descriptor = fstd.get(htd.getTableName());
HTableDescriptor descriptor = fstd.get(htd.getTableName());
assertEquals(descriptor, htd);
}
@ -162,7 +161,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName()));
@ -171,11 +170,11 @@ public class TestFSTableDescriptors {
@Test public void testReadingHTDFromFS() throws IOException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
fstd.createTableDescriptor(htd);
TableDescriptor td2 =
HTableDescriptor td2 =
FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName());
assertTrue(htd.equals(td2));
}
@ -185,25 +184,25 @@ public class TestFSTableDescriptors {
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = UTIL.getDataTestDir(name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
Path descriptorFile = fstd.updateTableDescriptor(htd);
try (FSDataOutputStream out = fs.create(descriptorFile, true)) {
out.write(TableDescriptorBuilder.toByteArray(htd));
out.write(htd.toByteArray());
}
FSTableDescriptors fstd2 = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor td2 = fstd2.get(htd.getTableName());
HTableDescriptor td2 = fstd2.get(htd.getTableName());
assertEquals(htd, td2);
FileStatus descriptorFile2 =
FSTableDescriptors.getTableInfoPath(fs, fstd2.getTableDir(htd.getTableName()));
byte[] buffer = TableDescriptorBuilder.toByteArray(htd);
byte[] buffer = htd.toByteArray();
try (FSDataInputStream in = fs.open(descriptorFile2.getPath())) {
in.readFully(buffer);
}
TableDescriptor td3 = TableDescriptorBuilder.parseFrom(buffer);
HTableDescriptor td3 = HTableDescriptor.parseFrom(buffer);
assertEquals(htd, td3);
}
@Test public void testTableDescriptors()
@Test public void testHTableDescriptors()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@ -211,7 +210,7 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) {
@Override
public TableDescriptor get(TableName tablename)
public HTableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info(tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
@ -220,7 +219,9 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
HTableDescriptor htd = new HTableDescriptor(
new HTableDescriptor(TableName.valueOf(name + i)));
htds.createTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
@ -231,9 +232,9 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
htds.updateTableDescriptor(builder.build());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htd.addFamily(new HColumnDescriptor("" + i));
htds.updateTableDescriptor(htd);
}
// Wait a while so mod time we write is for sure different.
Thread.sleep(100);
@ -249,7 +250,7 @@ public class TestFSTableDescriptors {
}
@Test
public void testTableDescriptorsNoCache()
public void testHTableDescriptorsNoCache()
throws IOException, InterruptedException {
final String name = this.name.getMethodName();
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
@ -260,7 +261,8 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htds.createTableDescriptor(htd);
}
for (int i = 0; i < 2 * count; i++) {
@ -268,14 +270,14 @@ public class TestFSTableDescriptors {
}
// Update the table infos
for (int i = 0; i < count; i++) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i));
builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of("" + i));
htds.updateTableDescriptor(builder.build());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htd.addFamily(new HColumnDescriptor("" + i));
htds.updateTableDescriptor(htd);
}
for (int i = 0; i < count; i++) {
assertNotNull("Expected HTD, got null instead", htds.get(TableName.valueOf(name + i)));
assertTrue("Column Family " + i + " missing",
htds.get(TableName.valueOf(name + i)).hasColumnFamily(Bytes.toBytes("" + i)));
htds.get(TableName.valueOf(name + i)).hasFamily(Bytes.toBytes("" + i)));
}
assertEquals(count * 4, htds.invocations);
assertEquals("expected=0, actual=" + htds.cachehits, 0, htds.cachehits);
@ -292,10 +294,12 @@ public class TestFSTableDescriptors {
final int count = 4;
// Write out table infos.
for (int i = 0; i < count; i++) {
htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
htds.createTableDescriptor(htd);
}
// add hbase:meta
htds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build());
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
htds.createTableDescriptor(htd);
assertEquals("getAll() didn't return all TableDescriptors, expected: " +
(count + 1) + " got: " + htds.getAll().size(),
@ -317,7 +321,8 @@ public class TestFSTableDescriptors {
final int count = 10;
// Write out table infos via non-cached FSTableDescriptors
for (int i = 0; i < count; i++) {
nonchtds.createTableDescriptor(TableDescriptorBuilder.newBuilder(TableName.valueOf(name + i)).build());
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i));
nonchtds.createTableDescriptor(htd);
}
// Calls to getAll() won't increase the cache counter, do per table.
@ -328,15 +333,15 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build();
HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1
assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
for (Map.Entry entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey();
TableDescriptor nchtd = entry.getValue();
HTableDescriptor nchtd = (HTableDescriptor) entry.getValue();
assertTrue("expected " + htd.toString() +
" got: " + chtds.get(TableName.valueOf(t)).toString(),
(nchtd.equals(chtds.get(TableName.valueOf(t)))));
@ -361,7 +366,7 @@ public class TestFSTableDescriptors {
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
htds.add(htd);
htds.add(htd);
htds.add(htd);
@ -410,14 +415,12 @@ public class TestFSTableDescriptors {
@Test
public void testCreateTableDescriptorUpdatesIfExistsAlready() throws IOException {
Path testdir = UTIL.getDataTestDir(name.getMethodName());
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build();
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName()));
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir);
assertTrue(fstd.createTableDescriptor(htd));
assertFalse(fstd.createTableDescriptor(htd));
htd = TableDescriptorBuilder.newBuilder(htd)
.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"))
.build();
htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue"));
assertTrue(fstd.createTableDescriptor(htd)); //this will re-create
Path tableDir = fstd.getTableDir(htd.getTableName());
Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR);
@ -440,10 +443,10 @@ public class TestFSTableDescriptors {
}
@Override
public TableDescriptor get(TableName tablename)
public HTableDescriptor get(TableName tablename)
throws TableExistsException, FileNotFoundException, IOException {
LOG.info((super.isUsecache() ? "Cached" : "Non-Cached") +
" TableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
" HTableDescriptor.get() on " + tablename + ", cachehits=" + this.cachehits);
return super.get(tablename);
}
}