HBASE-23055 Alter hbase:meta (#1043)

Make hbase:meta region schema dynamic.

Patch has been under development a good while and its focus has changed
a few times so its bloated with fixup from older versions.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
M hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java
 Shut down access to internals and removed unused methods.

M hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/EnableTableProcedure.java
 Cleanup/refactor section on replica-handling.

M hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
 Get hbase:meta schema from filesystem rather than from hard-coding.
This commit is contained in:
Michael Stack 2020-01-21 13:17:27 -08:00 committed by stack
parent 00e64d83e8
commit ba3463d9de
29 changed files with 417 additions and 355 deletions

View File

@ -304,11 +304,18 @@ public class MetaTableAccessor {
*/ */
public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo)
throws IOException { throws IOException {
byte[] row = getMetaKeyForRegion(regionInfo); return getRegionLocation(getCatalogFamilyRow(connection, regionInfo),
Get get = new Get(row); regionInfo, regionInfo.getReplicaId());
}
/**
* @return Return the {@link HConstants#CATALOG_FAMILY} row from hbase:meta.
*/
public static Result getCatalogFamilyRow(Connection connection, RegionInfo ri)
throws IOException {
Get get = new Get(getMetaKeyForRegion(ri));
get.addFamily(HConstants.CATALOG_FAMILY); get.addFamily(HConstants.CATALOG_FAMILY);
Result r = get(getMetaHTable(connection), get); return get(getMetaHTable(connection), get);
return getRegionLocation(r, regionInfo, regionInfo.getReplicaId());
} }
/** Returns the row key to use for this regionInfo */ /** Returns the row key to use for this regionInfo */
@ -1109,6 +1116,7 @@ public class MetaTableAccessor {
/** /**
* Updates state in META * Updates state in META
* Do not use. For internal use only.
* @param conn connection to use * @param conn connection to use
* @param tableName table to look for * @param tableName table to look for
*/ */

View File

@ -666,22 +666,35 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
new DisableTableProcedureBiConsumer(tableName)); new DisableTableProcedureBiConsumer(tableName));
} }
/**
* Utility for completing passed TableState {@link CompletableFuture} <code>future</code>
* using passed parameters. Sets error or boolean result ('true' if table matches
* the passed-in targetState).
*/
private static CompletableFuture<Boolean> completeCheckTableState(
CompletableFuture<Boolean> future, TableState tableState, Throwable error,
TableState.State targetState, TableName tableName) {
if (error != null) {
future.completeExceptionally(error);
} else {
if (tableState != null) {
future.complete(tableState.inStates(targetState));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}
return future;
}
@Override @Override
public CompletableFuture<Boolean> isTableEnabled(TableName tableName) { public CompletableFuture<Boolean> isTableEnabled(TableName tableName) {
if (TableName.isMetaTableName(tableName)) { if (TableName.isMetaTableName(tableName)) {
return CompletableFuture.completedFuture(true); return CompletableFuture.completedFuture(true);
} }
CompletableFuture<Boolean> future = new CompletableFuture<>(); CompletableFuture<Boolean> future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> { addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
if (error != null) { completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error,
future.completeExceptionally(error); TableState.State.ENABLED, tableName);
return;
}
if (state.isPresent()) {
future.complete(state.get().inStates(TableState.State.ENABLED));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}); });
return future; return future;
} }
@ -692,16 +705,9 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
return CompletableFuture.completedFuture(false); return CompletableFuture.completedFuture(false);
} }
CompletableFuture<Boolean> future = new CompletableFuture<>(); CompletableFuture<Boolean> future = new CompletableFuture<>();
addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (state, error) -> { addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> {
if (error != null) { completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error,
future.completeExceptionally(error); TableState.State.DISABLED, tableName);
return;
}
if (state.isPresent()) {
future.complete(state.get().inStates(TableState.State.DISABLED));
} else {
future.completeExceptionally(new TableNotFoundException(tableName));
}
}); });
return future; return future;
} }

View File

@ -196,7 +196,7 @@ class ZKAsyncRegistry implements AsyncRegistry {
addListener( addListener(
zk.list(znodePaths.baseZNode) zk.list(znodePaths.baseZNode)
.thenApply(children -> children.stream() .thenApply(children -> children.stream()
.filter(c -> c.startsWith(znodePaths.metaZNodePrefix)).collect(Collectors.toList())), .filter(c -> this.znodePaths.isMetaZNodePrefix(c)).collect(Collectors.toList())),
(metaReplicaZNodes, error) -> { (metaReplicaZNodes, error) -> {
if (error != null) { if (error != null) {
future.completeExceptionally(error); future.completeExceptionally(error);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -24,6 +24,7 @@ import static org.apache.hadoop.hbase.HConstants.SPLIT_LOGDIR_NAME;
import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT; import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; import static org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID;
import java.util.Collection;
import java.util.Optional; import java.util.Optional;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -40,15 +41,24 @@ public class ZNodePaths {
// TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved. // TODO: Replace this with ZooKeeper constant when ZOOKEEPER-277 is resolved.
public static final char ZNODE_PATH_SEPARATOR = '/'; public static final char ZNODE_PATH_SEPARATOR = '/';
public final static String META_ZNODE_PREFIX = "meta-region-server"; private static final String META_ZNODE_PREFIX = "meta-region-server";
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup"; private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
// base znode for this cluster // base znode for this cluster
public final String baseZNode; public final String baseZNode;
// the prefix of meta znode, does not include baseZNode.
public final String metaZNodePrefix; /**
// znodes containing the locations of the servers hosting the meta replicas * The prefix of meta znode. Does not include baseZNode.
public final ImmutableMap<Integer, String> metaReplicaZNodes; * Its a 'prefix' because meta replica id integer can be tagged on the end (if
* no number present, it is 'default' replica).
*/
private final String metaZNodePrefix;
/**
* znodes containing the locations of the servers hosting the meta replicas
*/
private final ImmutableMap<Integer, String> metaReplicaZNodes;
// znode containing ephemeral nodes of the regionservers // znode containing ephemeral nodes of the regionservers
public final String rsZNode; public final String rsZNode;
// znode containing ephemeral nodes of the draining regionservers // znode containing ephemeral nodes of the draining regionservers
@ -154,21 +164,21 @@ public class ZNodePaths {
} }
/** /**
* Is the znode of any meta replica * @return true if the znode is a meta region replica
* @param node
* @return true or false
*/ */
public boolean isAnyMetaReplicaZNode(String node) { public boolean isAnyMetaReplicaZNode(String node) {
if (metaReplicaZNodes.containsValue(node)) { return this.metaReplicaZNodes.containsValue(node);
return true;
}
return false;
} }
/** /**
* Get the znode string corresponding to a replicaId * @return Meta Replica ZNodes
* @param replicaId */
* @return znode public Collection<String> getMetaReplicaZNodes() {
return this.metaReplicaZNodes.values();
}
/**
* @return the znode string corresponding to a replicaId
*/ */
public String getZNodeForReplica(int replicaId) { public String getZNodeForReplica(int replicaId) {
// return a newly created path but don't update the cache of paths // return a newly created path but don't update the cache of paths
@ -179,24 +189,21 @@ public class ZNodePaths {
} }
/** /**
* Parse the meta replicaId from the passed znode * Parse the meta replicaId from the passed znode name.
* @param znode the name of the znode, does not include baseZNode * @param znode the name of the znode, does not include baseZNode
* @return replicaId * @return replicaId
*/ */
public int getMetaReplicaIdFromZnode(String znode) { public int getMetaReplicaIdFromZnode(String znode) {
if (znode.equals(metaZNodePrefix)) { return znode.equals(metaZNodePrefix)?
return RegionInfo.DEFAULT_REPLICA_ID; RegionInfo.DEFAULT_REPLICA_ID:
} Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
return Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1));
} }
/** /**
* Is it the default meta replica's znode * @return True if meta znode.
* @param znode the name of the znode, does not include baseZNode
* @return true or false
*/ */
public boolean isDefaultMetaReplicaZnode(String znode) { public boolean isMetaZNodePrefix(String znode) {
return metaReplicaZNodes.get(DEFAULT_REPLICA_ID).equals(znode); return znode != null && znode.startsWith(this.metaZNodePrefix);
} }
/** /**

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -26,7 +26,6 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.UUID; import java.util.UUID;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
@ -1189,12 +1188,6 @@ public final class HConstants {
HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME HBCK_SIDELINEDIR_NAME, HBASE_TEMP_DIRECTORY, MIGRATION_NAME
})); }));
/** Directories that are not HBase user table directories */
public static final List<String> HBASE_NON_USER_TABLE_DIRS =
Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll(
new String[] { TableName.META_TABLE_NAME.getNameAsString() },
HBASE_NON_TABLE_DIRS.toArray())));
/** Health script related settings. */ /** Health script related settings. */
public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location";
public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout"; public static final String HEALTH_SCRIPT_TIMEOUT = "hbase.node.health.script.timeout";

View File

@ -25,25 +25,19 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
/** /**
* Get, remove and modify table descriptors. * Get, remove and modify table descriptors.
* Used by servers to host descriptors.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public interface TableDescriptors { public interface TableDescriptors {
/** /**
* @param tableName
* @return TableDescriptor for tablename * @return TableDescriptor for tablename
* @throws IOException
*/ */
TableDescriptor get(final TableName tableName) TableDescriptor get(final TableName tableName) throws IOException;
throws IOException;
/** /**
* Get Map of all NamespaceDescriptors for a given namespace. * Get Map of all NamespaceDescriptors for a given namespace.
* @return Map of all descriptors. * @return Map of all descriptors.
* @throws IOException
*/ */
Map<String, TableDescriptor> getByNamespace(String name) Map<String, TableDescriptor> getByNamespace(String name) throws IOException;
throws IOException;
/** /**
* Get Map of all TableDescriptors. Populates the descriptor cache as a * Get Map of all TableDescriptors. Populates the descriptor cache as a
@ -51,25 +45,19 @@ public interface TableDescriptors {
* Notice: the key of map is the table name which contains namespace. It was generated by * Notice: the key of map is the table name which contains namespace. It was generated by
* {@link TableName#getNameWithNamespaceInclAsString()}. * {@link TableName#getNameWithNamespaceInclAsString()}.
* @return Map of all descriptors. * @return Map of all descriptors.
* @throws IOException
*/ */
Map<String, TableDescriptor> getAll() throws IOException; Map<String, TableDescriptor> getAll() throws IOException;
/** /**
* Add or update descriptor * Add or update descriptor
* @param htd Descriptor to set into TableDescriptors * @param htd Descriptor to set into TableDescriptors
* @throws IOException
*/ */
void add(final TableDescriptor htd) void update(final TableDescriptor htd) throws IOException;
throws IOException;
/** /**
* @param tablename
* @return Instance of table descriptor or null if none found. * @return Instance of table descriptor or null if none found.
* @throws IOException
*/ */
TableDescriptor remove(final TableName tablename) TableDescriptor remove(final TableName tablename) throws IOException;
throws IOException;
/** /**
* Enables the tabledescriptor cache * Enables the tabledescriptor cache

View File

@ -999,7 +999,7 @@ public class HMaster extends HRegionServer implements MasterServices {
RegionState rs = this.assignmentManager.getRegionStates(). RegionState rs = this.assignmentManager.getRegionStates().
getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO); getRegionState(RegionInfoBuilder.FIRST_META_REGIONINFO);
LOG.info("hbase:meta {}", rs); LOG.info("hbase:meta {}", rs);
if (rs.isOffline()) { if (rs != null && rs.isOffline()) {
Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream() Optional<InitMetaProcedure> optProc = procedureExecutor.getProcedures().stream()
.filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny(); .filter(p -> p instanceof InitMetaProcedure).map(o -> (InitMetaProcedure) o).findAny();
initMetaProc = optProc.orElseGet(() -> { initMetaProc = optProc.orElseGet(() -> {

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -69,7 +69,7 @@ public class TableStateManager {
private final ConcurrentMap<TableName, TableState.State> tableName2State = private final ConcurrentMap<TableName, TableState.State> tableName2State =
new ConcurrentHashMap<>(); new ConcurrentHashMap<>();
public TableStateManager(MasterServices master) { TableStateManager(MasterServices master) {
this.master = master; this.master = master;
} }
@ -88,61 +88,6 @@ public class TableStateManager {
} }
} }
/**
* Set table state to provided but only if table in specified states Caller should lock table on
* write.
* @param tableName table to change state for
* @param newState new state
* @param states states to check against
* @return null if succeed or table state if failed
*/
public TableState setTableStateIfInStates(TableName tableName, TableState.State newState,
TableState.State... states) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName);
lock.writeLock().lock();
try {
TableState currentState = readMetaState(tableName);
if (currentState == null) {
throw new TableNotFoundException(tableName);
}
if (currentState.inStates(states)) {
updateMetaState(tableName, newState);
return null;
} else {
return currentState;
}
} finally {
lock.writeLock().unlock();
}
}
/**
* Set table state to provided but only if table not in specified states Caller should lock table
* on write.
* @param tableName table to change state for
* @param newState new state
* @param states states to check against
*/
public boolean setTableStateIfNotInStates(TableName tableName, TableState.State newState,
TableState.State... states) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName);
lock.writeLock().lock();
try {
TableState currentState = readMetaState(tableName);
if (currentState == null) {
throw new TableNotFoundException(tableName);
}
if (!currentState.inStates(states)) {
updateMetaState(tableName, newState);
return true;
} else {
return false;
}
} finally {
lock.writeLock().unlock();
}
}
public boolean isTableState(TableName tableName, TableState.State... states) { public boolean isTableState(TableName tableName, TableState.State... states) {
try { try {
TableState tableState = getTableState(tableName); TableState tableState = getTableState(tableName);
@ -156,6 +101,7 @@ public class TableStateManager {
public void setDeletedTable(TableName tableName) throws IOException { public void setDeletedTable(TableName tableName) throws IOException {
if (tableName.equals(TableName.META_TABLE_NAME)) { if (tableName.equals(TableName.META_TABLE_NAME)) {
// Can't delete the hbase:meta table.
return; return;
} }
ReadWriteLock lock = tnLock.getLock(tableName); ReadWriteLock lock = tnLock.getLock(tableName);
@ -184,7 +130,7 @@ public class TableStateManager {
* @param states filter by states * @param states filter by states
* @return tables in given states * @return tables in given states
*/ */
public Set<TableName> getTablesInStates(TableState.State... states) throws IOException { Set<TableName> getTablesInStates(TableState.State... states) throws IOException {
// Only be called in region normalizer, will not use cache. // Only be called in region normalizer, will not use cache.
final Set<TableName> rv = Sets.newHashSet(); final Set<TableName> rv = Sets.newHashSet();
MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() { MetaTableAccessor.fullScanTables(master.getConnection(), new MetaTableAccessor.Visitor() {
@ -200,12 +146,6 @@ public class TableStateManager {
return rv; return rv;
} }
public static class TableStateNotFoundException extends TableNotFoundException {
TableStateNotFoundException(TableName tableName) {
super(tableName.getNameAsString());
}
}
@NonNull @NonNull
public TableState getTableState(TableName tableName) throws IOException { public TableState getTableState(TableName tableName) throws IOException {
ReadWriteLock lock = tnLock.getLock(tableName); ReadWriteLock lock = tnLock.getLock(tableName);
@ -213,7 +153,7 @@ public class TableStateManager {
try { try {
TableState currentState = readMetaState(tableName); TableState currentState = readMetaState(tableName);
if (currentState == null) { if (currentState == null) {
throw new TableStateNotFoundException(tableName); throw new TableNotFoundException("No state found for " + tableName);
} }
return currentState; return currentState;
} finally { } finally {
@ -225,7 +165,7 @@ public class TableStateManager {
if (tableName.equals(TableName.META_TABLE_NAME)) { if (tableName.equals(TableName.META_TABLE_NAME)) {
if (TableState.State.DISABLING.equals(newState) || if (TableState.State.DISABLING.equals(newState) ||
TableState.State.DISABLED.equals(newState)) { TableState.State.DISABLED.equals(newState)) {
throw new IllegalArgumentIOException("Cannot disable the meta table; " + newState); throw new IllegalArgumentIOException("Cannot disable meta table; " + newState);
} }
// Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED. // Otherwise, just return; no need to set ENABLED on meta -- it is always ENABLED.
return; return;
@ -237,7 +177,7 @@ public class TableStateManager {
succ = true; succ = true;
} finally { } finally {
if (!succ) { if (!succ) {
tableName2State.remove(tableName); this.tableName2State.remove(tableName);
} }
} }
metaStateUpdated(tableName, newState); metaStateUpdated(tableName, newState);
@ -264,10 +204,8 @@ public class TableStateManager {
} }
public void start() throws IOException { public void start() throws IOException {
TableDescriptors tableDescriptors = master.getTableDescriptors();
migrateZooKeeper(); migrateZooKeeper();
Connection connection = master.getConnection(); fixTableStates(master.getTableDescriptors(), master.getConnection());
fixTableStates(tableDescriptors, connection);
} }
private void fixTableStates(TableDescriptors tableDescriptors, Connection connection) private void fixTableStates(TableDescriptors tableDescriptors, Connection connection)
@ -336,7 +274,7 @@ public class TableStateManager {
TableState ts = null; TableState ts = null;
try { try {
ts = getTableState(entry.getKey()); ts = getTableState(entry.getKey());
} catch (TableStateNotFoundException e) { } catch (TableNotFoundException e) {
// This can happen; table exists but no TableState. // This can happen; table exists but no TableState.
} }
if (ts == null) { if (ts == null) {

View File

@ -146,8 +146,7 @@ public class RegionStateStore {
} }
} }
public void updateRegionLocation(RegionStateNode regionStateNode) void updateRegionLocation(RegionStateNode regionStateNode) throws IOException {
throws IOException {
if (regionStateNode.getRegionInfo().isMetaRegion()) { if (regionStateNode.getRegionInfo().isMetaRegion()) {
updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(), updateMetaLocation(regionStateNode.getRegionInfo(), regionStateNode.getRegionLocation(),
regionStateNode.getState()); regionStateNode.getState());

View File

@ -78,9 +78,7 @@ public class CreateTableProcedure
@Override @Override
protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state) protected Flow executeFromState(final MasterProcedureEnv env, final CreateTableState state)
throws InterruptedException { throws InterruptedException {
if (LOG.isTraceEnabled()) { LOG.info("{} execute state={}", this, state);
LOG.trace(this + " execute state=" + state);
}
try { try {
switch (state) { switch (state) {
case CREATE_TABLE_PRE_OPERATION: case CREATE_TABLE_PRE_OPERATION:
@ -320,8 +318,7 @@ public class CreateTableProcedure
// using a copy of descriptor, table will be created enabling first // using a copy of descriptor, table will be created enabling first
final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName()); final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
((FSTableDescriptors)(env.getMasterServices().getTableDescriptors())) ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
.createTableDescriptorForTableDirectory( .createTableDescriptorForTableDirectory(tempTableDir, tableDescriptor, false);
tempTableDir, tableDescriptor, false);
// 2. Create Regions // 2. Create Regions
newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,

View File

@ -243,7 +243,8 @@ public class DisableTableProcedure
private boolean prepareDisable(final MasterProcedureEnv env) throws IOException { private boolean prepareDisable(final MasterProcedureEnv env) throws IOException {
boolean canTableBeDisabled = true; boolean canTableBeDisabled = true;
if (tableName.equals(TableName.META_TABLE_NAME)) { if (tableName.equals(TableName.META_TABLE_NAME)) {
setFailure("master-disable-table", new ConstraintException("Cannot disable catalog table")); setFailure("master-disable-table",
new ConstraintException("Cannot disable " + this.tableName));
canTableBeDisabled = false; canTableBeDisabled = false;
} else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) { } else if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-disable-table", new TableNotFoundException(tableName)); setFailure("master-disable-table", new TableNotFoundException(tableName));

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -27,11 +27,9 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotDisabledException;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@ -99,66 +97,55 @@ public class EnableTableProcedure
setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE); setNextState(EnableTableState.ENABLE_TABLE_MARK_REGIONS_ONLINE);
break; break;
case ENABLE_TABLE_MARK_REGIONS_ONLINE: case ENABLE_TABLE_MARK_REGIONS_ONLINE:
// Get the region replica count. If changed since disable, need to do
// more work assigning.
Connection connection = env.getMasterServices().getConnection(); Connection connection = env.getMasterServices().getConnection();
// we will need to get the tableDescriptor here to see if there is a change in the replica TableDescriptor tableDescriptor =
// count
TableDescriptor hTableDescriptor =
env.getMasterServices().getTableDescriptors().get(tableName); env.getMasterServices().getTableDescriptors().get(tableName);
int configuredReplicaCount = tableDescriptor.getRegionReplication();
// Get the replica count // Get regions for the table from memory; get both online and offline regions ('true').
int regionReplicaCount = hTableDescriptor.getRegionReplication();
// Get the regions for the table from memory; get both online and offline regions
// ('true').
List<RegionInfo> regionsOfTable = List<RegionInfo> regionsOfTable =
env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true); env.getAssignmentManager().getRegionStates().getRegionsOfTable(tableName, true);
int currentMaxReplica = 0; // How many replicas do we currently have? Check regions returned from
// Check if the regions in memory have replica regions as marked in META table // in-memory state.
for (RegionInfo regionInfo : regionsOfTable) { int currentMaxReplica = getMaxReplicaId(regionsOfTable);
if (regionInfo.getReplicaId() > currentMaxReplica) {
// Iterating through all the list to identify the highest replicaID region.
// We can stop after checking with the first set of regions??
currentMaxReplica = regionInfo.getReplicaId();
}
}
// read the META table to know the actual number of replicas for the table - if there // Read the META table to know the number of replicas the table currently has.
// was a table modification on region replica then this will reflect the new entries also // If there was a table modification on region replica count then need to
int replicasFound = // adjust replica counts here.
getNumberOfReplicasFromMeta(connection, regionReplicaCount, regionsOfTable); int replicasFound = TableName.isMetaTableName(this.tableName)?
assert regionReplicaCount - 1 == replicasFound; 0: // TODO: Figure better what to do here for hbase:meta replica.
LOG.info(replicasFound + " META entries added for the given regionReplicaCount " getReplicaCountInMeta(connection, configuredReplicaCount, regionsOfTable);
+ regionReplicaCount + " for the table " + tableName.getNameAsString()); LOG.info("replicasFound={} (configuredReplicaCount={} for {}", replicasFound,
if (currentMaxReplica == (regionReplicaCount - 1)) { configuredReplicaCount, tableName.getNameAsString());
if (currentMaxReplica == (configuredReplicaCount - 1)) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("There is no change to the number of region replicas." LOG.debug("No change in number of region replicas (configuredReplicaCount={});"
+ " Assigning the available regions." + " Current and previous" + " assigning.", configuredReplicaCount);
+ "replica count is " + regionReplicaCount);
} }
} else if (currentMaxReplica > (regionReplicaCount - 1)) { } else if (currentMaxReplica > (configuredReplicaCount - 1)) {
// we have additional regions as the replica count has been decreased. Delete // We have additional regions as the replica count has been decreased. Delete
// those regions because already the table is in the unassigned state // those regions because already the table is in the unassigned state
LOG.info("The number of replicas " + (currentMaxReplica + 1) LOG.info("The number of replicas " + (currentMaxReplica + 1)
+ " is more than the region replica count " + regionReplicaCount); + " is more than the region replica count " + configuredReplicaCount);
List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable); List<RegionInfo> copyOfRegions = new ArrayList<RegionInfo>(regionsOfTable);
for (RegionInfo regionInfo : copyOfRegions) { for (RegionInfo regionInfo : copyOfRegions) {
if (regionInfo.getReplicaId() > (regionReplicaCount - 1)) { if (regionInfo.getReplicaId() > (configuredReplicaCount - 1)) {
// delete the region from the regionStates // delete the region from the regionStates
env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo); env.getAssignmentManager().getRegionStates().deleteRegion(regionInfo);
// remove it from the list of regions of the table // remove it from the list of regions of the table
LOG.info("The regioninfo being removed is " + regionInfo + " " LOG.info("Removed replica={} of {}", regionInfo.getRegionId(), regionInfo);
+ regionInfo.getReplicaId());
regionsOfTable.remove(regionInfo); regionsOfTable.remove(regionInfo);
} }
} }
} else { } else {
// the replicasFound is less than the regionReplication // the replicasFound is less than the regionReplication
LOG.info("The number of replicas has been changed(increased)." LOG.info("Number of replicas has increased. Assigning new region replicas." +
+ " Lets assign the new region replicas. The previous replica count was " "The previous replica count was {}. The current replica count is {}.",
+ (currentMaxReplica + 1) + ". The current replica count is " + regionReplicaCount); (currentMaxReplica + 1), configuredReplicaCount);
regionsOfTable = RegionReplicaUtil.addReplicas(hTableDescriptor, regionsOfTable, regionsOfTable = RegionReplicaUtil.addReplicas(tableDescriptor, regionsOfTable,
currentMaxReplica + 1, regionReplicaCount); currentMaxReplica + 1, configuredReplicaCount);
} }
// Assign all the table regions. (including region replicas if added). // Assign all the table regions. (including region replicas if added).
// createAssignProcedure will try to retain old assignments if possible. // createAssignProcedure will try to retain old assignments if possible.
@ -186,9 +173,13 @@ public class EnableTableProcedure
return Flow.HAS_MORE_STATE; return Flow.HAS_MORE_STATE;
} }
private int getNumberOfReplicasFromMeta(Connection connection, int regionReplicaCount, /**
* @return Count of replicas found reading hbase:meta Region row or zk if
* asking about the hbase:meta table itself..
*/
private int getReplicaCountInMeta(Connection connection, int regionReplicaCount,
List<RegionInfo> regionsOfTable) throws IOException { List<RegionInfo> regionsOfTable) throws IOException {
Result r = getRegionFromMeta(connection, regionsOfTable); Result r = MetaTableAccessor.getCatalogFamilyRow(connection, regionsOfTable.get(0));
int replicasFound = 0; int replicasFound = 0;
for (int i = 1; i < regionReplicaCount; i++) { for (int i = 1; i < regionReplicaCount; i++) {
// Since we have already added the entries to the META we will be getting only that here // Since we have already added the entries to the META we will be getting only that here
@ -201,16 +192,6 @@ public class EnableTableProcedure
return replicasFound; return replicasFound;
} }
private Result getRegionFromMeta(Connection connection, List<RegionInfo> regionsOfTable)
throws IOException {
byte[] metaKeyForRegion = MetaTableAccessor.getMetaKeyForRegion(regionsOfTable.get(0));
Get get = new Get(metaKeyForRegion);
get.addFamily(HConstants.CATALOG_FAMILY);
Table metaTable = MetaTableAccessor.getMetaHTable(connection);
Result r = metaTable.get(get);
return r;
}
@Override @Override
protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state) protected void rollbackState(final MasterProcedureEnv env, final EnableTableState state)
throws IOException { throws IOException {
@ -408,4 +389,20 @@ public class EnableTableProcedure
} }
} }
} }
/**
* @return Maximum region replica id found in passed list of regions.
*/
private static int getMaxReplicaId(List<RegionInfo> regions) {
int max = 0;
for (RegionInfo regionInfo: regions) {
if (regionInfo.getReplicaId() > max) {
// Iterating through all the list to identify the highest replicaID region.
// We can stop after checking with the first set of regions??
max = regionInfo.getReplicaId();
}
}
return max;
}
} }

View File

@ -19,6 +19,8 @@
package org.apache.hadoop.hbase.master.procedure; package org.apache.hadoop.hbase.master.procedure;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer; import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -59,10 +62,19 @@ public class ModifyTableProcedure
private TableDescriptor modifiedTableDescriptor; private TableDescriptor modifiedTableDescriptor;
private boolean deleteColumnFamilyInModify; private boolean deleteColumnFamilyInModify;
private boolean shouldCheckDescriptor; private boolean shouldCheckDescriptor;
/**
* List of column families that cannot be deleted from the hbase:meta table.
* They are critical to cluster operation. This is a bit of an odd place to
* keep this list but then this is the tooling that does add/remove. Keeping
* it local!
*/
private static final List<byte []> UNDELETABLE_META_COLUMNFAMILIES =
Collections.unmodifiableList(Arrays.asList(
HConstants.CATALOG_FAMILY, HConstants.TABLE_FAMILY, HConstants.REPLICATION_BARRIER_FAMILY));
public ModifyTableProcedure() { public ModifyTableProcedure() {
super(); super();
initilize(null, false); initialize(null, false);
} }
public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd) public ModifyTableProcedure(final MasterProcedureEnv env, final TableDescriptor htd)
@ -81,12 +93,28 @@ public class ModifyTableProcedure
final TableDescriptor oldTableDescriptor, final boolean shouldCheckDescriptor) final TableDescriptor oldTableDescriptor, final boolean shouldCheckDescriptor)
throws HBaseIOException { throws HBaseIOException {
super(env, latch); super(env, latch);
initilize(oldTableDescriptor, shouldCheckDescriptor); initialize(oldTableDescriptor, shouldCheckDescriptor);
this.modifiedTableDescriptor = newTableDescriptor; this.modifiedTableDescriptor = newTableDescriptor;
preflightChecks(env, null/*No table checks; if changing peers, table can be online*/); preflightChecks(env, null/*No table checks; if changing peers, table can be online*/);
} }
private void initilize(final TableDescriptor unmodifiedTableDescriptor, @Override
protected void preflightChecks(MasterProcedureEnv env, Boolean enabled) throws HBaseIOException {
super.preflightChecks(env, enabled);
if (this.modifiedTableDescriptor.isMetaTable()) {
// If we are modifying the hbase:meta table, make sure we are not deleting critical
// column families else we'll damage the cluster.
Set<byte []> cfs = this.modifiedTableDescriptor.getColumnFamilyNames();
for (byte[] family : UNDELETABLE_META_COLUMNFAMILIES) {
if (!cfs.contains(family)) {
throw new HBaseIOException("Delete of hbase:meta column family " +
Bytes.toString(family));
}
}
}
}
private void initialize(final TableDescriptor unmodifiedTableDescriptor,
final boolean shouldCheckDescriptor) { final boolean shouldCheckDescriptor) {
this.unmodifiedTableDescriptor = unmodifiedTableDescriptor; this.unmodifiedTableDescriptor = unmodifiedTableDescriptor;
this.shouldCheckDescriptor = shouldCheckDescriptor; this.shouldCheckDescriptor = shouldCheckDescriptor;
@ -284,17 +312,27 @@ public class ModifyTableProcedure
"REGION_REPLICATION change is not supported for enabled tables"); "REGION_REPLICATION change is not supported for enabled tables");
} }
} }
this.deleteColumnFamilyInModify = isDeleteColumnFamily(unmodifiedTableDescriptor,
modifiedTableDescriptor);
}
// Find out whether all column families in unmodifiedTableDescriptor also exists in /**
// the modifiedTableDescriptor. This is to determine whether we are safe to rollback. * Find out whether all column families in unmodifiedTableDescriptor also exists in
final Set<byte[]> oldFamilies = unmodifiedTableDescriptor.getColumnFamilyNames(); * the modifiedTableDescriptor.
final Set<byte[]> newFamilies = modifiedTableDescriptor.getColumnFamilyNames(); * @return True if we are deleting a column family.
for (byte[] familyName : oldFamilies) { */
private static boolean isDeleteColumnFamily(TableDescriptor originalDescriptor,
TableDescriptor newDescriptor) {
boolean result = false;
final Set<byte[]> originalFamilies = originalDescriptor.getColumnFamilyNames();
final Set<byte[]> newFamilies = newDescriptor.getColumnFamilyNames();
for (byte[] familyName : originalFamilies) {
if (!newFamilies.contains(familyName)) { if (!newFamilies.contains(familyName)) {
this.deleteColumnFamilyInModify = true; result = true;
break; break;
} }
} }
return result;
} }
/** /**
@ -315,7 +353,7 @@ public class ModifyTableProcedure
* @throws IOException * @throws IOException
**/ **/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
} }
/** /**

View File

@ -368,7 +368,7 @@ public class RestoreSnapshotProcedure
* @throws IOException * @throws IOException
**/ **/
private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException { private void updateTableDescriptor(final MasterProcedureEnv env) throws IOException {
env.getMasterServices().getTableDescriptors().add(modifiedTableDescriptor); env.getMasterServices().getTableDescriptors().update(modifiedTableDescriptor);
} }
/** /**

View File

@ -23,10 +23,10 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface; import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
@ -139,7 +139,7 @@ public abstract class AbstractPeerProcedure<TState> extends AbstractPeerNoLockPr
return true; return true;
} }
Thread.sleep(SLEEP_INTERVAL_MS); Thread.sleep(SLEEP_INTERVAL_MS);
} catch (TableStateNotFoundException e) { } catch (TableNotFoundException e) {
return false; return false;
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e); throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -20,10 +20,10 @@ package org.apache.hadoop.hbase.master.replication;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.master.TableStateManager;
import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure; import org.apache.hadoop.hbase.master.procedure.ReopenTableRegionsProcedure;
@ -124,7 +124,7 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
return false; return false;
} }
Thread.sleep(SLEEP_INTERVAL_MS); Thread.sleep(SLEEP_INTERVAL_MS);
} catch (TableStateNotFoundException e) { } catch (TableNotFoundException e) {
return false; return false;
} catch (InterruptedException e) { } catch (InterruptedException e) {
throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e); throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);

View File

@ -1,4 +1,4 @@
/** /*
* *
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
@ -41,6 +41,6 @@ public class MetaLocationSyncer extends ClientZKSyncer {
@Override @Override
Collection<String> getNodesToWatch() { Collection<String> getNodesToWatch() {
return watcher.getZNodePaths().metaReplicaZNodes.values(); return watcher.getZNodePaths().getMetaReplicaZNodes();
} }
} }

View File

@ -472,11 +472,10 @@ public final class SnapshotManifest {
public void consolidate() throws IOException { public void consolidate() throws IOException {
if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) { if (getSnapshotFormat(desc) == SnapshotManifestV1.DESCRIPTOR_VERSION) {
Path rootDir = FSUtils.getRootDir(conf);
LOG.info("Using old Snapshot Format"); LOG.info("Using old Snapshot Format");
// write a copy of descriptor to the snapshot directory // write a copy of descriptor to the snapshot directory
new FSTableDescriptors(conf, workingDirFs, rootDir) FSTableDescriptors.createTableDescriptorForTableDirectory(workingDirFs, workingDir, htd,
.createTableDescriptorForTableDirectory(workingDir, htd, false); false);
} else { } else {
LOG.debug("Convert to Single Snapshot Manifest"); LOG.debug("Convert to Single Snapshot Manifest");
convertToV2SingleManifest(); convertToV2SingleManifest();

View File

@ -122,6 +122,7 @@ public class FSTableDescriptors implements TableDescriptors {
* @param fsreadonly True if we are read-only when it comes to filesystem * @param fsreadonly True if we are read-only when it comes to filesystem
* operations; i.e. on remove, we do not do delete in fs. * operations; i.e. on remove, we do not do delete in fs.
*/ */
@VisibleForTesting
public FSTableDescriptors(final Configuration conf, final FileSystem fs, public FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException {
this(conf, fs, rootdir, fsreadonly, usecache, null); this(conf, fs, rootdir, fsreadonly, usecache, null);
@ -141,12 +142,27 @@ public class FSTableDescriptors implements TableDescriptors {
this.rootdir = rootdir; this.rootdir = rootdir;
this.fsreadonly = fsreadonly; this.fsreadonly = fsreadonly;
this.usecache = usecache; this.usecache = usecache;
this.metaTableDescriptor = metaObserver == null ? createMetaTableDescriptor(conf) TableDescriptor td = null;
: metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build(); try {
td = getTableDescriptorFromFs(fs, rootdir, TableName.META_TABLE_NAME);
} catch (TableInfoMissingException e) {
td = metaObserver == null? createMetaTableDescriptor(conf):
metaObserver.apply(createMetaTableDescriptorBuilder(conf)).build();
if (!fsreadonly) {
LOG.info("Creating new hbase:meta table default descriptor/schema {}", td);
updateTableDescriptor(td);
}
}
this.metaTableDescriptor = td;
} }
/**
*
* Make this private as soon as we've undone test dependency.
*/
@VisibleForTesting @VisibleForTesting
public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf) throws IOException { public static TableDescriptorBuilder createMetaTableDescriptorBuilder(final Configuration conf)
throws IOException {
// TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now // TODO We used to set CacheDataInL1 for META table. When we have BucketCache in file mode, now
// the META table data goes to File mode BC only. Test how that affect the system. If too much, // the META table data goes to File mode BC only. Test how that affect the system. If too much,
// we have to rethink about adding back the setCacheDataInL1 for META table CFs. // we have to rethink about adding back the setCacheDataInL1 for META table CFs.
@ -228,16 +244,6 @@ public class FSTableDescriptors implements TableDescriptors {
public TableDescriptor get(final TableName tablename) public TableDescriptor get(final TableName tablename)
throws IOException { throws IOException {
invocations++; invocations++;
if (TableName.META_TABLE_NAME.equals(tablename)) {
cachehits++;
return metaTableDescriptor;
}
// hbase:meta is already handled. If some one tries to get the descriptor for
// .logs, .oldlogs or .corrupt throw an exception.
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) {
throw new IOException("No descriptor found for non table = " + tablename);
}
if (usecache) { if (usecache) {
// Look in cache of descriptors. // Look in cache of descriptors.
TableDescriptor cachedtdm = this.cache.get(tablename); TableDescriptor cachedtdm = this.cache.get(tablename);
@ -332,19 +338,10 @@ public class FSTableDescriptors implements TableDescriptors {
* and updates the local cache with it. * and updates the local cache with it.
*/ */
@Override @Override
public void add(TableDescriptor htd) throws IOException { public void update(TableDescriptor htd) throws IOException {
if (fsreadonly) { if (fsreadonly) {
throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
} }
TableName tableName = htd.getTableName();
if (TableName.META_TABLE_NAME.equals(tableName)) {
throw new NotImplementedException(HConstants.NOT_IMPLEMENTED);
}
if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
throw new NotImplementedException(
"Cannot add a table descriptor for a reserved subdirectory name: "
+ htd.getTableName().getNameAsString());
}
updateTableDescriptor(htd); updateTableDescriptor(htd);
} }
@ -369,26 +366,6 @@ public class FSTableDescriptors implements TableDescriptors {
return descriptor; return descriptor;
} }
/**
* Checks if a current table info file exists for the given table
*
* @param tableName name of table
* @return true if exists
* @throws IOException
*/
public boolean isTableInfoExists(TableName tableName) throws IOException {
return getTableInfoPath(tableName) != null;
}
/**
* Find the most current table info file for the given table in the hbase root directory.
* @return The file status of the current table info file or null if it does not exist
*/
private FileStatus getTableInfoPath(final TableName tableName) throws IOException {
Path tableDir = getTableDir(tableName);
return getTableInfoPath(tableDir);
}
private FileStatus getTableInfoPath(Path tableDir) private FileStatus getTableInfoPath(Path tableDir)
throws IOException { throws IOException {
return getTableInfoPath(fs, tableDir, !fsreadonly); return getTableInfoPath(fs, tableDir, !fsreadonly);
@ -403,7 +380,6 @@ public class FSTableDescriptors implements TableDescriptors {
* were sequence numbers). * were sequence numbers).
* *
* @return The file status of the current table info file or null if it does not exist * @return The file status of the current table info file or null if it does not exist
* @throws IOException
*/ */
public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir) public static FileStatus getTableInfoPath(FileSystem fs, Path tableDir)
throws IOException { throws IOException {
@ -421,7 +397,6 @@ public class FSTableDescriptors implements TableDescriptors {
* older files. * older files.
* *
* @return The file status of the current table info file or null if none exist * @return The file status of the current table info file or null if none exist
* @throws IOException
*/ */
private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles) private static FileStatus getTableInfoPath(FileSystem fs, Path tableDir, boolean removeOldFiles)
throws IOException { throws IOException {
@ -609,21 +584,6 @@ public class FSTableDescriptors implements TableDescriptors {
return p; return p;
} }
/**
* Deletes all the table descriptor files from the file system.
* Used in unit tests only.
* @throws NotImplementedException if in read only mode
*/
public void deleteTableDescriptorIfExists(TableName tableName) throws IOException {
if (fsreadonly) {
throw new NotImplementedException("Cannot delete a table descriptor - in read only mode");
}
Path tableDir = getTableDir(tableName);
Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR);
deleteTableDescriptorFiles(fs, tableInfoDir, Integer.MAX_VALUE);
}
/** /**
* Deletes files matching the table info file pattern within the given directory * Deletes files matching the table info file pattern within the given directory
* whose sequenceId is at most the given max sequenceId. * whose sequenceId is at most the given max sequenceId.
@ -746,7 +706,8 @@ public class FSTableDescriptors implements TableDescriptors {
/** /**
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create * Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table or snapshot a table. * a new table during cluster start or in Clone and Create Table Procedures. Checks readOnly flag
* passed on construction.
* @param tableDir table directory under which we should write the file * @param tableDir table directory under which we should write the file
* @param htd description of the table to write * @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will * @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
@ -755,11 +716,28 @@ public class FSTableDescriptors implements TableDescriptors {
* already exists and we weren't forcing the descriptor creation. * already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs * @throws IOException if a filesystem error occurs
*/ */
public boolean createTableDescriptorForTableDirectory(Path tableDir, public boolean createTableDescriptorForTableDirectory(Path tableDir, TableDescriptor htd,
TableDescriptor htd, boolean forceCreation) throws IOException { boolean forceCreation) throws IOException {
if (fsreadonly) { if (this.fsreadonly) {
throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
} }
return createTableDescriptorForTableDirectory(this.fs, tableDir, htd, forceCreation);
}
/**
* Create a new TableDescriptor in HDFS in the specified table directory. Happens when we create
* a new table snapshoting. Does not enforce read-only. That is for caller to determine.
* @param fs Filesystem to use.
* @param tableDir table directory under which we should write the file
* @param htd description of the table to write
* @param forceCreation if <tt>true</tt>,then even if previous table descriptor is present it will
* be overwritten
* @return <tt>true</tt> if the we successfully created the file, <tt>false</tt> if the file
* already exists and we weren't forcing the descriptor creation.
* @throws IOException if a filesystem error occurs
*/
public static boolean createTableDescriptorForTableDirectory(FileSystem fs,
Path tableDir, TableDescriptor htd, boolean forceCreation) throws IOException {
FileStatus status = getTableInfoPath(fs, tableDir); FileStatus status = getTableInfoPath(fs, tableDir);
if (status != null) { if (status != null) {
LOG.debug("Current path=" + status.getPath()); LOG.debug("Current path=" + status.getPath());
@ -772,9 +750,7 @@ public class FSTableDescriptors implements TableDescriptors {
} }
} }
} }
Path p = writeTableDescriptor(fs, htd, tableDir, status); return writeTableDescriptor(fs, htd, tableDir, status) != null;
return p != null; }
}
} }

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -492,17 +492,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
return new Path(fs.getWorkingDirectory(), "test-data"); return new Path(fs.getWorkingDirectory(), "test-data");
} }
/**
* @return META table descriptor
*/
public TableDescriptorBuilder getMetaTableDescriptorBuilder() {
try {
return FSTableDescriptors.createMetaTableDescriptorBuilder(conf);
} catch (IOException e) {
throw new RuntimeException("Unable to create META table descriptor", e);
}
}
/** /**
* Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
* to write temporary test data. Call this method after setting up the mini dfs cluster * to write temporary test data. Call this method after setting up the mini dfs cluster

View File

@ -67,7 +67,7 @@ public class TestFSTableDescriptorForceCreation {
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
fstd.add(htd); fstd.update(htd);
assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false)); assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false));
} }

View File

@ -0,0 +1,119 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
/**
* Test being able to edit hbase:meta.
*/
@Category({MiscTests.class, LargeTests.class})
public class TestHBaseMetaEdit {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestHBaseMetaEdit.class);
@Rule
public TestName name = new TestName();
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
@Before
public void before() throws Exception {
UTIL.startMiniCluster();
}
@After
public void after() throws Exception {
UTIL.shutdownMiniCluster();
}
/**
* Set versions, set HBASE-16213 indexed block encoding, and add a column family.
* Delete the column family. Then try to delete a core hbase:meta family (should fail).
* Verify they are all in place by looking at TableDescriptor AND by checking
* what the RegionServer sees after opening Region.
*/
@Test
public void testEditMeta() throws IOException {
Admin admin = UTIL.getAdmin();
admin.tableExists(TableName.META_TABLE_NAME);
TableDescriptor originalDescriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
ColumnFamilyDescriptor cfd = originalDescriptor.getColumnFamily(HConstants.CATALOG_FAMILY);
int oldVersions = cfd.getMaxVersions();
// Add '1' to current versions count. Set encoding too.
cfd = ColumnFamilyDescriptorBuilder.newBuilder(cfd).setMaxVersions(oldVersions + 1).
setConfiguration(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING,
DataBlockEncoding.ROW_INDEX_V1.toString()).build();
admin.modifyColumnFamily(TableName.META_TABLE_NAME, cfd);
byte [] extraColumnFamilyName = Bytes.toBytes("xtra");
ColumnFamilyDescriptor newCfd =
ColumnFamilyDescriptorBuilder.newBuilder(extraColumnFamilyName).build();
admin.addColumnFamily(TableName.META_TABLE_NAME, newCfd);
TableDescriptor descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
// Assert new max versions is == old versions plus 1.
assertEquals(oldVersions + 1,
descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getMaxVersions());
assertTrue(descriptor.getColumnFamily(newCfd.getName()) != null);
String encoding = descriptor.getColumnFamily(HConstants.CATALOG_FAMILY).getConfiguration().
get(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
Region r = UTIL.getHBaseCluster().getRegionServer(0).
getRegion(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedName());
assertEquals(oldVersions + 1,
r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().getMaxVersions());
encoding = r.getStore(HConstants.CATALOG_FAMILY).getColumnFamilyDescriptor().
getConfigurationValue(ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING);
assertEquals(encoding, DataBlockEncoding.ROW_INDEX_V1.toString());
assertTrue(r.getStore(extraColumnFamilyName) != null);
// Assert we can't drop critical hbase:meta column family but we can drop any other.
admin.deleteColumnFamily(TableName.META_TABLE_NAME, newCfd.getName());
descriptor = admin.getDescriptor(TableName.META_TABLE_NAME);
assertTrue(descriptor.getColumnFamily(newCfd.getName()) == null);
try {
admin.deleteColumnFamily(TableName.META_TABLE_NAME, HConstants.CATALOG_FAMILY);
fail("Should not reach here");
} catch (HBaseIOException hioe) {
assertTrue(hioe.getMessage().contains("Delete of hbase:meta"));
}
}
}

View File

@ -327,7 +327,7 @@ public class MockMasterServices extends MockNoopMasterServices {
} }
@Override @Override
public void add(TableDescriptor htd) throws IOException { public void update(TableDescriptor htd) throws IOException {
// noop // noop
} }

View File

@ -36,17 +36,20 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL;
import org.junit.ClassRule; import org.junit.ClassRule;
import org.junit.Rule; import org.junit.Rule;
@ -88,11 +91,11 @@ public class TestGetClosestAtOrBefore {
FileSystem filesystem = FileSystem.get(conf); FileSystem filesystem = FileSystem.get(conf);
Path rootdir = UTIL.getDataTestDirOnTestFS(); Path rootdir = UTIL.getDataTestDirOnTestFS();
// Up flush size else we bind up when we use default catalog flush of 16k. // Up flush size else we bind up when we use default catalog flush of 16k.
TableDescriptorBuilder metaBuilder = UTIL.getMetaTableDescriptorBuilder() TableDescriptors tds = new FSTableDescriptors(UTIL.getConfiguration());
.setMemStoreFlushSize(64 * 1024 * 1024); TableDescriptor td = tds.get(TableName.META_TABLE_NAME);
td = TableDescriptorBuilder.newBuilder(td).setMemStoreFlushSize(64 * 1024 * 1024).build();
HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO,
rootdir, this.conf, metaBuilder.build()); rootdir, this.conf, td);
try { try {
// Write rows for three tables 'A', 'B', and 'C'. // Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) { for (char c = 'A'; c < 'D'; c++) {

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL;
@ -90,6 +91,9 @@ public class TestLogRollingNoCluster {
final Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); final Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.set(WALFactory.WAL_PROVIDER, "filesystem"); conf.set(WALFactory.WAL_PROVIDER, "filesystem");
FSUtils.setRootDir(conf, dir); FSUtils.setRootDir(conf, dir);
FSTableDescriptors fsTableDescriptors =
new FSTableDescriptors(TEST_UTIL.getConfiguration());
TableDescriptor metaTableDescriptor = fsTableDescriptors.get(TableName.META_TABLE_NAME);
conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName()); conf.set("hbase.regionserver.hlog.writer.impl", HighLatencySyncWriter.class.getName());
final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName()); final WALFactory wals = new WALFactory(conf, TestLogRollingNoCluster.class.getName());
final WAL wal = wals.getWAL(null); final WAL wal = wals.getWAL(null);
@ -101,7 +105,7 @@ public class TestLogRollingNoCluster {
try { try {
for (int i = 0; i < numThreads; i++) { for (int i = 0; i < numThreads; i++) {
// Have each appending thread write 'count' entries // Have each appending thread write 'count' entries
appenders[i] = new Appender(wal, i, NUM_ENTRIES); appenders[i] = new Appender(metaTableDescriptor, wal, i, NUM_ENTRIES);
} }
for (int i = 0; i < numThreads; i++) { for (int i = 0; i < numThreads; i++) {
appenders[i].start(); appenders[i].start();
@ -114,7 +118,7 @@ public class TestLogRollingNoCluster {
wals.close(); wals.close();
} }
for (int i = 0; i < numThreads; i++) { for (int i = 0; i < numThreads; i++) {
assertFalse(appenders[i].isException()); assertFalse("Error: " + appenders[i].getException(), appenders[i].isException());
} }
TEST_UTIL.shutdownMiniDFSCluster(); TEST_UTIL.shutdownMiniDFSCluster();
} }
@ -127,11 +131,13 @@ public class TestLogRollingNoCluster {
private final WAL wal; private final WAL wal;
private final int count; private final int count;
private Exception e = null; private Exception e = null;
private final TableDescriptor metaTableDescriptor;
Appender(final WAL wal, final int index, final int count) { Appender(TableDescriptor metaTableDescriptor, final WAL wal, final int index, final int count) {
super("" + index); super("" + index);
this.wal = wal; this.wal = wal;
this.count = count; this.count = count;
this.metaTableDescriptor = metaTableDescriptor;
this.log = LoggerFactory.getLogger("Appender:" + getName()); this.log = LoggerFactory.getLogger("Appender:" + getName());
} }
@ -161,9 +167,8 @@ public class TestLogRollingNoCluster {
byte[] bytes = Bytes.toBytes(i); byte[] bytes = Bytes.toBytes(i);
edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY));
RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO; RegionInfo hri = RegionInfoBuilder.FIRST_META_REGIONINFO;
TableDescriptor htd = TEST_UTIL.getMetaTableDescriptorBuilder().build();
NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR); NavigableMap<byte[], Integer> scopes = new TreeMap<>(Bytes.BYTES_COMPARATOR);
for(byte[] fam : htd.getColumnFamilyNames()) { for(byte[] fam: this.metaTableDescriptor.getColumnFamilyNames()) {
scopes.put(fam, 0); scopes.put(fam, 0);
} }
final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(), final long txid = wal.appendData(hri, new WALKeyImpl(hri.getEncodedNameAsBytes(),

View File

@ -169,7 +169,7 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
htds.add(htd); htds.update(htd);
assertNotNull(htds.remove(htd.getTableName())); assertNotNull(htds.remove(htd.getTableName()));
assertNull(htds.remove(htd.getTableName())); assertNull(htds.remove(htd.getTableName()));
} }
@ -322,8 +322,13 @@ public class TestFSTableDescriptors {
} }
Map<String, TableDescriptor> tables = tds.getAll(); Map<String, TableDescriptor> tables = tds.getAll();
// Remove hbase:meta from list. It shows up now since we made it dynamic. The schema
// is written into the fs by the FSTableDescriptors constructor now where before it
// didn't.
tables.remove(TableName.META_TABLE_NAME.getNameAsString());
assertEquals(4, tables.size()); assertEquals(4, tables.size());
String[] tableNamesOrdered = String[] tableNamesOrdered =
new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" }; new String[] { "bar:foo", "default:bar", "default:foo", "foo:bar" };
int i = 0; int i = 0;
@ -359,12 +364,13 @@ public class TestFSTableDescriptors {
assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); assertTrue(nonchtds.getAll().size() == chtds.getAll().size());
// add a new entry for hbase:meta // add a new entry for random table name.
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).build(); TableName random = TableName.valueOf("random");
TableDescriptor htd = TableDescriptorBuilder.newBuilder(random).build();
nonchtds.createTableDescriptor(htd); nonchtds.createTableDescriptor(htd);
// hbase:meta will only increase the cachehit by 1 // random will only increase the cachehit by 1
assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1);
for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) { for (Map.Entry<String, TableDescriptor> entry: nonchtds.getAll().entrySet()) {
String t = (String) entry.getKey(); String t = (String) entry.getKey();
@ -394,9 +400,9 @@ public class TestFSTableDescriptors {
Path rootdir = new Path(UTIL.getDataTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name)).build();
htds.add(htd); htds.update(htd);
htds.add(htd); htds.update(htd);
htds.add(htd); htds.update(htd);
} }
@Test @Test

View File

@ -44,6 +44,8 @@ EOF
puts puts
end end
formatter.footer formatter.footer
if table.to_s != 'hbase:meta'
# No QUOTAS if hbase:meta table
puts puts
formatter.header(%w[QUOTAS]) formatter.header(%w[QUOTAS])
count = quotas_admin.list_quotas(TABLE => table.to_s) do |_, quota| count = quotas_admin.list_quotas(TABLE => table.to_s) do |_, quota|
@ -51,6 +53,7 @@ EOF
end end
formatter.footer(count) formatter.footer(count)
end end
end
# rubocop:enable Metrics/AbcSize, Metrics/MethodLength # rubocop:enable Metrics/AbcSize, Metrics/MethodLength
end end
end end

View File

@ -1,4 +1,4 @@
/** /*
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -61,14 +61,6 @@ public final class MetaTableLocator {
private MetaTableLocator() { private MetaTableLocator() {
} }
/**
* Checks if the meta region location is available.
* @return true if meta region location is available, false if not
*/
public static boolean isLocationAvailable(ZKWatcher zkw) {
return getMetaRegionLocation(zkw) != null;
}
/** /**
* @param zkw ZooKeeper watcher to be used * @param zkw ZooKeeper watcher to be used
* @return meta table regions and their locations. * @return meta table regions and their locations.
@ -266,7 +258,7 @@ public final class MetaTableLocator {
} }
/** /**
* Load the meta region state from the meta server ZNode. * Load the meta region state from the meta region server ZNode.
* *
* @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
* @param replicaId the ID of the replica * @param replicaId the ID of the replica
@ -306,10 +298,8 @@ public final class MetaTableLocator {
if (serverName == null) { if (serverName == null) {
state = RegionState.State.OFFLINE; state = RegionState.State.OFFLINE;
} }
return new RegionState( return new RegionState(RegionReplicaUtil.getRegionInfoForReplica(
RegionReplicaUtil.getRegionInfoForReplica( RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName);
RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId),
state, serverName);
} }
/** /**

View File

@ -2056,7 +2056,7 @@ public final class ZKUtil {
" byte(s) of data from znode " + znode + " byte(s) of data from znode " + znode +
(watcherSet? " and set watcher; ": "; data=") + (watcherSet? " and set watcher; ": "; data=") +
(data == null? "null": data.length == 0? "empty": ( (data == null? "null": data.length == 0? "empty": (
znode.startsWith(zkw.getZNodePaths().metaZNodePrefix)? zkw.getZNodePaths().isMetaZNodePrefix(znode)?
getServerNameOrEmptyString(data): getServerNameOrEmptyString(data):
znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)? znode.startsWith(zkw.getZNodePaths().backupMasterAddressesZNode)?
getServerNameOrEmptyString(data): getServerNameOrEmptyString(data):