diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java index 9d1570de9db..5b4f7c782d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java @@ -169,6 +169,11 @@ class ConnectionAdapter implements ClusterConnection { return wrappedConnection.isTableAvailable(tableName, splitKeys); } + @Override + public TableState getTableState(TableName tableName) throws IOException { + return wrappedConnection.getTableState(tableName); + } + @Override public HTableDescriptor[] listTables() throws IOException { return wrappedConnection.listTables(); @@ -435,4 +440,4 @@ class ConnectionAdapter implements ClusterConnection { public AsyncProcess getAsyncProcess() { return wrappedConnection.getAsyncProcess(); } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java index 7c9c0b93aaf..bbf180ee18c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java @@ -176,6 +176,8 @@ import com.google.protobuf.BlockingRpcChannel; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * An internal, A non-instantiable class that manages creation of {@link HConnection}s. */ @@ -893,7 +895,7 @@ class ConnectionManager { @Override public boolean isTableEnabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, true); + return getTableState(tableName).inStates(TableState.State.ENABLED); } @Override @@ -903,7 +905,7 @@ class ConnectionManager { @Override public boolean isTableDisabled(TableName tableName) throws IOException { - return this.registry.isTableOnlineState(tableName, false); + return getTableState(tableName).inStates(TableState.State.DISABLED); } @Override @@ -1992,6 +1994,13 @@ class ConnectionManager { return stub.listTableNamesByNamespace(controller, request); } + @Override + public GetTableStateResponse getTableState( + RpcController controller, GetTableStateRequest request) + throws ServiceException { + return stub.getTableState(controller, request); + } + @Override public void close() { release(this.mss); @@ -2498,6 +2507,20 @@ class ConnectionManager { throws IOException { return getHTableDescriptor(TableName.valueOf(tableName)); } + + @Override + public TableState getTableState(TableName tableName) throws IOException { + MasterKeepAliveConnection master = getKeepAliveMasterService(); + try { + GetTableStateResponse resp = master.getTableState(null, + RequestConverter.buildGetTableStateRequest(tableName)); + return TableState.convert(resp.getTableState()); + } catch (ServiceException se) { + throw ProtobufUtil.getRemoteException(se); + } finally { + master.close(); + } + } } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java index cd11a520cf8..918c94452cf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java @@ -207,6 +207,13 @@ public interface HConnection extends Abortable, Closeable { @Deprecated boolean isTableDisabled(byte[] tableName) throws IOException; + /** + * Retrieve TableState, represent current table state. + * @param tableName table state for + * @return state of the table + */ + public TableState getTableState(TableName tableName) throws IOException; + /** * @param tableName table name * @return true if all regions of the table are available, false otherwise @@ -576,4 +583,4 @@ public interface HConnection extends Abortable, Closeable { * @deprecated internal method, do not use thru HConnection */ @Deprecated public NonceGenerator getNonceGenerator(); -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java index aab547ecfe3..89c8cefa3d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Registry.java @@ -43,15 +43,9 @@ interface Registry { */ String getClusterId(); - /** - * @param enabled Return true if table is enabled - * @throws IOException - */ - boolean isTableOnlineState(TableName tableName, boolean enabled) throws IOException; - /** * @return Count of 'running' regionservers * @throws IOException */ int getCurrentNrHRS() throws IOException; -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java new file mode 100644 index 00000000000..bb7a0289ac0 --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java @@ -0,0 +1,203 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; + +/** + * Represents table state. + */ +@InterfaceAudience.Private +public class TableState { + + @InterfaceAudience.Public + @InterfaceStability.Evolving + public static enum State { + ENABLED, + DISABLED, + DISABLING, + ENABLING; + + /** + * Covert from PB version of State + * + * @param state convert from + * @return POJO + */ + public static State convert(HBaseProtos.TableState.State state) { + State ret; + switch (state) { + case ENABLED: + ret = State.ENABLED; + break; + case DISABLED: + ret = State.DISABLED; + break; + case DISABLING: + ret = State.DISABLING; + break; + case ENABLING: + ret = State.ENABLING; + break; + default: + throw new IllegalStateException(state.toString()); + } + return ret; + } + + /** + * Covert to PB version of State + * + * @return PB + */ + public HBaseProtos.TableState.State convert() { + HBaseProtos.TableState.State state; + switch (this) { + case ENABLED: + state = HBaseProtos.TableState.State.ENABLED; + break; + case DISABLED: + state = HBaseProtos.TableState.State.DISABLED; + break; + case DISABLING: + state = HBaseProtos.TableState.State.DISABLING; + break; + case ENABLING: + state = HBaseProtos.TableState.State.ENABLING; + break; + default: + throw new IllegalStateException(this.toString()); + } + return state; + } + + } + + private final long timestamp; + private final TableName tableName; + private final State state; + + /** + * Create instance of TableState. + * @param state table state + */ + public TableState(TableName tableName, State state, long timestamp) { + this.tableName = tableName; + this.state = state; + this.timestamp = timestamp; + } + + /** + * Create instance of TableState with current timestamp + * + * @param tableName table for which state is created + * @param state state of the table + */ + public TableState(TableName tableName, State state) { + this(tableName, state, System.currentTimeMillis()); + } + + /** + * @return table state + */ + public State getState() { + return state; + } + + /** + * Timestamp of table state + * + * @return milliseconds + */ + public long getTimestamp() { + return timestamp; + } + + /** + * Table name for state + * + * @return milliseconds + */ + public TableName getTableName() { + return tableName; + } + + /** + * Check that table in given states + * @param state state + * @return true if satisfies + */ + public boolean inStates(State state) { + return this.state.equals(state); + } + + /** + * Check that table in given states + * @param states state list + * @return true if satisfies + */ + public boolean inStates(State... states) { + for (State s : states) { + if (s.equals(this.state)) + return true; + } + return false; + } + + + /** + * Covert to PB version of TableState + * @return PB + */ + public HBaseProtos.TableState convert() { + return HBaseProtos.TableState.newBuilder() + .setState(this.state.convert()) + .setTable(ProtobufUtil.toProtoTableName(this.tableName)) + .setTimestamp(this.timestamp) + .build(); + } + + /** + * Covert from PB version of TableState + * @param tableState convert from + * @return POJO + */ + public static TableState convert(HBaseProtos.TableState tableState) { + TableState.State state = State.convert(tableState.getState()); + return new TableState(ProtobufUtil.toTableName(tableState.getTable()), + state, tableState.getTimestamp()); + } + + /** + * Static version of state checker + * @param state desired + * @param target equals to any of + * @return true if satisfies + */ + public static boolean isInStates(State state, State... target) { + for (State tableState : target) { + if (state.equals(tableState)) + return true; + } + return false; + } +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java index 9123d50ed0b..4d3cc3e5d2a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ZooKeeperRegistry.java @@ -18,18 +18,17 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.io.InterruptedIOException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.zookeeper.KeeperException; @@ -97,24 +96,6 @@ class ZooKeeperRegistry implements Registry { return this.clusterId; } - @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); - try { - if (enabled) { - return ZKTableStateClientSideReader.isEnabledTable(zkw, tableName); - } - return ZKTableStateClientSideReader.isDisabledTable(zkw, tableName); - } catch (KeeperException e) { - throw new IOException("Enable/Disable failed", e); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } finally { - zkw.close(); - } - } - @Override public int getCurrentNrHRS() throws IOException { ZooKeeperKeepAliveConnection zkw = hci.getKeepAliveZooKeeperWatcher(); @@ -128,4 +109,4 @@ class ZooKeeperRegistry implements Registry { zkw.close(); } } -} \ No newline at end of file +} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index d6bcb29b366..ab764a38e47 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.protobuf; import java.io.IOException; import java.util.List; +import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.classification.InterfaceAudience; @@ -106,6 +107,8 @@ import org.apache.hadoop.hbase.util.Pair; import com.google.protobuf.ByteString; +import static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*; + /** * Helper utility to build protocol buffer requests, * or build components for protocol buffer requests. @@ -1176,6 +1179,19 @@ public final class RequestConverter { return builder.build(); } + /** + * Creates a protocol buffer GetTableStateRequest + * + * @param tableName table to get request for + * @return a GetTableStateRequest + */ + public static GetTableStateRequest buildGetTableStateRequest( + final TableName tableName) { + return GetTableStateRequest.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); + } + /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table * diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java deleted file mode 100644 index 94bd31ef85a..00000000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateClientSideReader.java +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright The Apache Software Foundation - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import com.google.protobuf.InvalidProtocolBufferException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.zookeeper.KeeperException; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -/** - * Non-instantiable class that provides helper functions to learn - * about HBase table state for code running on client side (hence, not having - * access to consensus context). - * - * Doesn't cache any table state, just goes directly to ZooKeeper. - * TODO: decouple this class from ZooKeeper. - */ -@InterfaceAudience.Private -public class ZKTableStateClientSideReader { - - private ZKTableStateClientSideReader() {} - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#ENABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isEnabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - return getTableState(zkw, tableName) == ZooKeeperProtos.Table.State.ENABLED; - } - - /** - * Go to zookeeper and see if state of table is {@code ZooKeeperProtos.Table.State#DISABLING} - * of {@code ZooKeeperProtos.Table.State#DISABLED}. - * This method does not use cache. - * This method is for clients other than AssignmentManager. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return True if table is enabled. - * @throws KeeperException - */ - public static boolean isDisablingOrDisabledTable(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - return isTableState(ZooKeeperProtos.Table.State.DISABLING, state) || - isTableState(ZooKeeperProtos.Table.State.DISABLED, state); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED) disabledTables.add(tableName); - } - return disabledTables; - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @return Set of disabled tables, empty Set if none - * @throws KeeperException - */ - public static Set getDisabledOrDisablingTables(ZooKeeperWatcher zkw) - throws KeeperException, InterruptedException { - Set disabledTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode); - for (String child: children) { - TableName tableName = - TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(zkw, tableName); - if (state == ZooKeeperProtos.Table.State.DISABLED || - state == ZooKeeperProtos.Table.State.DISABLING) - disabledTables.add(tableName); - } - return disabledTables; - } - - static boolean isTableState(final ZooKeeperProtos.Table.State expectedState, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && currentState.equals(expectedState); - } - - /** - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - static ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } -} diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java index 8af95557644..2546f439ae6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java @@ -94,6 +94,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable { // znode containing the current cluster state public String clusterStateZNode; // znode used for table disabling/enabling + @Deprecated public String tableZNode; // znode containing the unique cluster ID public String clusterIdZNode; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 58b4604466f..c06dd996c64 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -129,12 +129,6 @@ public class TestClientNoCluster extends Configured implements Tool { return HConstants.CLUSTER_ID_DEFAULT; } - @Override - public boolean isTableOnlineState(TableName tableName, boolean enabled) - throws IOException { - return enabled; - } - @Override public int getCurrentNrHRS() throws IOException { return 1; @@ -814,4 +808,4 @@ public class TestClientNoCluster extends Configured implements Tool { public static void main(String[] args) throws Exception { System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args)); } -} \ No newline at end of file +} diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java index dd5559930a7..2f5322d9009 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java @@ -2404,6 +2404,1576 @@ public final class HBaseProtos { // @@protoc_insertion_point(class_scope:TableSchema) } + public interface TableStateOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState.State state = 1; + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + boolean hasState(); + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + + // required .TableName table = 2; + /** + * required .TableName table = 2; + */ + boolean hasTable(); + /** + * required .TableName table = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable(); + /** + * required .TableName table = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder(); + + // optional uint64 timestamp = 3; + /** + * optional uint64 timestamp = 3; + */ + boolean hasTimestamp(); + /** + * optional uint64 timestamp = 3; + */ + long getTimestamp(); + } + /** + * Protobuf type {@code TableState} + * + *
+   ** Denotes state of the table 
+   * 
+ */ + public static final class TableState extends + com.google.protobuf.GeneratedMessage + implements TableStateOrBuilder { + // Use TableState.newBuilder() to construct. + private TableState(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableState defaultInstance; + public static TableState getDefaultInstance() { + return defaultInstance; + } + + public TableState getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + state_ = value; + } + break; + } + case 18: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + subBuilder = table_.toBuilder(); + } + table_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(table_); + table_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000002; + break; + } + case 24: { + bitField0_ |= 0x00000004; + timestamp_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableState(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code TableState.State} + * + *
+     * Table's current state
+     * 
+ */ + public enum State + implements com.google.protobuf.ProtocolMessageEnum { + /** + * ENABLED = 0; + */ + ENABLED(0, 0), + /** + * DISABLED = 1; + */ + DISABLED(1, 1), + /** + * DISABLING = 2; + */ + DISABLING(2, 2), + /** + * ENABLING = 3; + */ + ENABLING(3, 3), + ; + + /** + * ENABLED = 0; + */ + public static final int ENABLED_VALUE = 0; + /** + * DISABLED = 1; + */ + public static final int DISABLED_VALUE = 1; + /** + * DISABLING = 2; + */ + public static final int DISABLING_VALUE = 2; + /** + * ENABLING = 3; + */ + public static final int ENABLING_VALUE = 3; + + + public final int getNumber() { return value; } + + public static State valueOf(int value) { + switch (value) { + case 0: return ENABLED; + case 1: return DISABLED; + case 2: return DISABLING; + case 3: return ENABLING; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public State findValueByNumber(int number) { + return State.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDescriptor().getEnumTypes().get(0); + } + + private static final State[] VALUES = values(); + + public static State valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private State(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:TableState.State) + } + + private int bitField0_; + // required .TableState.State state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1; + * + *
+     * This is the table's state.
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + + // required .TableName table = 2; + public static final int TABLE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_; + /** + * required .TableName table = 2; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + return table_; + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + return table_; + } + + // optional uint64 timestamp = 3; + public static final int TIMESTAMP_FIELD_NUMBER = 3; + private long timestamp_; + /** + * optional uint64 timestamp = 3; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 timestamp = 3; + */ + public long getTimestamp() { + return timestamp_; + } + + private void initFields() { + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + timestamp_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTable()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTable().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, table_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, timestamp_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, state_.getNumber()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, table_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, timestamp_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) obj; + + boolean result = true; + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && (hasTable() == other.hasTable()); + if (hasTable()) { + result = result && getTable() + .equals(other.getTable()); + } + result = result && (hasTimestamp() == other.hasTimestamp()); + if (hasTimestamp()) { + result = result && (getTimestamp() + == other.getTimestamp()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (hasTable()) { + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + } + if (hasTimestamp()) { + hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTimestamp()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableState} + * + *
+     ** Denotes state of the table 
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000001); + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + timestamp_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableState_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.state_ = state_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + if (tableBuilder_ == null) { + result.table_ = table_; + } else { + result.table_ = tableBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.timestamp_ = timestamp_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) return this; + if (other.hasState()) { + setState(other.getState()); + } + if (other.hasTable()) { + mergeTable(other.getTable()); + } + if (other.hasTimestamp()) { + setTimestamp(other.getTimestamp()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasState()) { + + return false; + } + if (!hasTable()) { + + return false; + } + if (!getTable().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState.State state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + state_ = value; + onChanged(); + return this; + } + /** + * required .TableState.State state = 1; + * + *
+       * This is the table's state.
+       * 
+ */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + onChanged(); + return this; + } + + // required .TableName table = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableBuilder_; + /** + * required .TableName table = 2; + */ + public boolean hasTable() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTable() { + if (tableBuilder_ == null) { + return table_; + } else { + return tableBuilder_.getMessage(); + } + } + /** + * required .TableName table = 2; + */ + public Builder setTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + table_ = value; + onChanged(); + } else { + tableBuilder_.setMessage(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder setTable( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableBuilder_ == null) { + table_ = builderForValue.build(); + onChanged(); + } else { + tableBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder mergeTable(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002) && + table_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + table_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(table_).mergeFrom(value).buildPartial(); + } else { + table_ = value; + } + onChanged(); + } else { + tableBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000002; + return this; + } + /** + * required .TableName table = 2; + */ + public Builder clearTable() { + if (tableBuilder_ == null) { + table_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableBuilder() { + bitField0_ |= 0x00000002; + onChanged(); + return getTableFieldBuilder().getBuilder(); + } + /** + * required .TableName table = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableOrBuilder() { + if (tableBuilder_ != null) { + return tableBuilder_.getMessageOrBuilder(); + } else { + return table_; + } + } + /** + * required .TableName table = 2; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableFieldBuilder() { + if (tableBuilder_ == null) { + tableBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + table_, + getParentForChildren(), + isClean()); + table_ = null; + } + return tableBuilder_; + } + + // optional uint64 timestamp = 3; + private long timestamp_ ; + /** + * optional uint64 timestamp = 3; + */ + public boolean hasTimestamp() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 timestamp = 3; + */ + public long getTimestamp() { + return timestamp_; + } + /** + * optional uint64 timestamp = 3; + */ + public Builder setTimestamp(long value) { + bitField0_ |= 0x00000004; + timestamp_ = value; + onChanged(); + return this; + } + /** + * optional uint64 timestamp = 3; + */ + public Builder clearTimestamp() { + bitField0_ = (bitField0_ & ~0x00000004); + timestamp_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableState) + } + + static { + defaultInstance = new TableState(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableState) + } + + public interface TableDescriptorOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableSchema schema = 1; + /** + * required .TableSchema schema = 1; + */ + boolean hasSchema(); + /** + * required .TableSchema schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema(); + /** + * required .TableSchema schema = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder(); + + // optional .TableState.State state = 2 [default = ENABLED]; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + boolean hasState(); + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState(); + } + /** + * Protobuf type {@code TableDescriptor} + * + *
+   ** On HDFS representation of table state. 
+   * 
+ */ + public static final class TableDescriptor extends + com.google.protobuf.GeneratedMessage + implements TableDescriptorOrBuilder { + // Use TableDescriptor.newBuilder() to construct. + private TableDescriptor(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TableDescriptor(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableDescriptor defaultInstance; + public static TableDescriptor getDefaultInstance() { + return defaultInstance; + } + + public TableDescriptor getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TableDescriptor( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = schema_.toBuilder(); + } + schema_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(schema_); + schema_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + case 16: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(2, rawValue); + } else { + bitField0_ |= 0x00000002; + state_ = value; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableDescriptor parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableDescriptor(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableSchema schema = 1; + public static final int SCHEMA_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_; + /** + * required .TableSchema schema = 1; + */ + public boolean hasSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + return schema_; + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + return schema_; + } + + // optional .TableState.State state = 2 [default = ENABLED]; + public static final int STATE_FIELD_NUMBER = 2; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + + private void initFields() { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasSchema()) { + memoizedIsInitialized = 0; + return false; + } + if (!getSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, schema_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeEnum(2, state_.getNumber()); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, schema_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(2, state_.getNumber()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) obj; + + boolean result = true; + result = result && (hasSchema() == other.hasSchema()); + if (hasSchema()) { + result = result && getSchema() + .equals(other.getSchema()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasSchema()) { + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code TableDescriptor} + * + *
+     ** On HDFS representation of table state. 
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptorOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.class, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getSchemaFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + } else { + schemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.internal_static_TableDescriptor_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor build() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor result = new org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (schemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = schemaBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.state_ = state_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor.getDefaultInstance()) return this; + if (other.hasSchema()) { + mergeSchema(other.getSchema()); + } + if (other.hasState()) { + setState(other.getState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasSchema()) { + + return false; + } + if (!getSchema().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableDescriptor) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableSchema schema = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> schemaBuilder_; + /** + * required .TableSchema schema = 1; + */ + public boolean hasSchema() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema getSchema() { + if (schemaBuilder_ == null) { + return schema_; + } else { + return schemaBuilder_.getMessage(); + } + } + /** + * required .TableSchema schema = 1; + */ + public Builder setSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + schemaBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder setSchema( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder builderForValue) { + if (schemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + schemaBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder mergeSchema(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema value) { + if (schemaBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + schema_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance()) { + schema_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.newBuilder(schema_).mergeFrom(value).buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + schemaBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableSchema schema = 1; + */ + public Builder clearSchema() { + if (schemaBuilder_ == null) { + schema_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.getDefaultInstance(); + onChanged(); + } else { + schemaBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder getSchemaBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getSchemaFieldBuilder().getBuilder(); + } + /** + * required .TableSchema schema = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder getSchemaOrBuilder() { + if (schemaBuilder_ != null) { + return schemaBuilder_.getMessageOrBuilder(); + } else { + return schema_; + } + } + /** + * required .TableSchema schema = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder> + getSchemaFieldBuilder() { + if (schemaBuilder_ == null) { + schemaBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchemaOrBuilder>( + schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + return schemaBuilder_; + } + + // optional .TableState.State state = 2 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public boolean hasState() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State getState() { + return state_; + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000002; + state_ = value; + onChanged(); + return this; + } + /** + * optional .TableState.State state = 2 [default = ENABLED]; + */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000002); + state_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.State.ENABLED; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:TableDescriptor) + } + + static { + defaultInstance = new TableDescriptor(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:TableDescriptor) + } + public interface ColumnFamilySchemaOrBuilder extends com.google.protobuf.MessageOrBuilder { @@ -16371,6 +17941,16 @@ public final class HBaseProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableSchema_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableState_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableState_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_TableDescriptor_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_TableDescriptor_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_ColumnFamilySchema_descriptor; private static @@ -16486,47 +18066,53 @@ public final class HBaseProtos { "Name\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPai" + "r\022,\n\017column_families\030\003 \003(\0132\023.ColumnFamil" + "ySchema\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr" + - "ingPair\"o\n\022ColumnFamilySchema\022\014\n\004name\030\001 " + - "\002(\014\022#\n\nattributes\030\002 \003(\0132\017.BytesBytesPair" + - "\022&\n\rconfiguration\030\003 \003(\0132\017.NameStringPair" + - "\"\232\001\n\nRegionInfo\022\021\n\tregion_id\030\001 \002(\004\022\036\n\nta", - "ble_name\030\002 \002(\0132\n.TableName\022\021\n\tstart_key\030" + - "\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007offline\030\005 \001(\010\022" + - "\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id\030\007 \001(\005:\0010\"1\n" + - "\014FavoredNodes\022!\n\014favored_node\030\001 \003(\0132\013.Se" + - "rverName\"\225\001\n\017RegionSpecifier\0222\n\004type\030\001 \002" + - "(\0162$.RegionSpecifier.RegionSpecifierType" + - "\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpecifierType\022\017" + - "\n\013REGION_NAME\020\001\022\027\n\023ENCODED_REGION_NAME\020\002" + - "\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"" + - "A\n\nServerName\022\021\n\thost_name\030\001 \002(\t\022\014\n\004port", - "\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033\n\013Coprocesso" + - "r\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004nam" + - "e\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair\022" + - "\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesByt" + - "esPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014\",\n" + - "\rNameInt64Pair\022\014\n\004name\030\001 \001(\t\022\r\n\005value\030\002 " + - "\001(\003\"\314\001\n\023SnapshotDescription\022\014\n\004name\030\001 \002(" + - "\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation_time\030\003 \001(\003:" + - "\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotDescription.T" + - "ype:\005FLUSH\022\017\n\007version\030\005 \001(\005\022\r\n\005owner\030\006 \001", - "(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005FLUSH\020\001\022\r\n\tS" + - "KIPFLUSH\020\002\"}\n\024ProcedureDescription\022\021\n\tsi" + - "gnature\030\001 \002(\t\022\020\n\010instance\030\002 \001(\t\022\030\n\rcreat" + - "ion_time\030\003 \001(\003:\0010\022&\n\rconfiguration\030\004 \003(\013" + - "2\017.NameStringPair\"\n\n\010EmptyMsg\"\033\n\007LongMsg" + - "\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDoubleMsg\022\022\n\ndoubl" + - "e_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg\022\026\n\016bigdecim" + - "al_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001" + - " \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"K\n\023Namespace" + - "Descriptor\022\014\n\004name\030\001 \002(\014\022&\n\rconfiguratio", - "n\030\002 \003(\0132\017.NameStringPair\"$\n\020RegionServer" + - "Info\022\020\n\010infoPort\030\001 \001(\005*r\n\013CompareType\022\010\n" + - "\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n" + - "\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GR" + - "EATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.apache.hadoop." + - "hbase.protobuf.generatedB\013HBaseProtosH\001\240" + - "\001\001" + "ingPair\"\235\001\n\nTableState\022 \n\005state\030\001 \002(\0162\021." + + "TableState.State\022\031\n\005table\030\002 \002(\0132\n.TableN" + + "ame\022\021\n\ttimestamp\030\003 \001(\004\"?\n\005State\022\013\n\007ENABL" + + "ED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENA", + "BLING\020\003\"Z\n\017TableDescriptor\022\034\n\006schema\030\001 \002" + + "(\0132\014.TableSchema\022)\n\005state\030\002 \001(\0162\021.TableS" + + "tate.State:\007ENABLED\"o\n\022ColumnFamilySchem" + + "a\022\014\n\004name\030\001 \002(\014\022#\n\nattributes\030\002 \003(\0132\017.By" + + "tesBytesPair\022&\n\rconfiguration\030\003 \003(\0132\017.Na" + + "meStringPair\"\232\001\n\nRegionInfo\022\021\n\tregion_id" + + "\030\001 \002(\004\022\036\n\ntable_name\030\002 \002(\0132\n.TableName\022\021" + + "\n\tstart_key\030\003 \001(\014\022\017\n\007end_key\030\004 \001(\014\022\017\n\007of" + + "fline\030\005 \001(\010\022\r\n\005split\030\006 \001(\010\022\025\n\nreplica_id" + + "\030\007 \001(\005:\0010\"1\n\014FavoredNodes\022!\n\014favored_nod", + "e\030\001 \003(\0132\013.ServerName\"\225\001\n\017RegionSpecifier" + + "\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" + + "pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" + + "cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" + + "EGION_NAME\020\002\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022" + + "\n\n\002to\030\002 \001(\004\"A\n\nServerName\022\021\n\thost_name\030\001" + + " \002(\t\022\014\n\004port\030\002 \001(\r\022\022\n\nstart_code\030\003 \001(\004\"\033" + + "\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStrin" + + "gPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNa" + + "meBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014", + "\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006sec" + + "ond\030\002 \002(\014\",\n\rNameInt64Pair\022\014\n\004name\030\001 \001(\t" + + "\022\r\n\005value\030\002 \001(\003\"\314\001\n\023SnapshotDescription\022" + + "\014\n\004name\030\001 \002(\t\022\r\n\005table\030\002 \001(\t\022\030\n\rcreation" + + "_time\030\003 \001(\003:\0010\022.\n\004type\030\004 \001(\0162\031.SnapshotD" + + "escription.Type:\005FLUSH\022\017\n\007version\030\005 \001(\005\022" + + "\r\n\005owner\030\006 \001(\t\".\n\004Type\022\014\n\010DISABLED\020\000\022\t\n\005" + + "FLUSH\020\001\022\r\n\tSKIPFLUSH\020\002\"}\n\024ProcedureDescr" + + "iption\022\021\n\tsignature\030\001 \002(\t\022\020\n\010instance\030\002 " + + "\001(\t\022\030\n\rcreation_time\030\003 \001(\003:\0010\022&\n\rconfigu", + "ration\030\004 \003(\0132\017.NameStringPair\"\n\n\010EmptyMs" + + "g\"\033\n\007LongMsg\022\020\n\010long_msg\030\001 \002(\003\"\037\n\tDouble" + + "Msg\022\022\n\ndouble_msg\030\001 \002(\001\"\'\n\rBigDecimalMsg" + + "\022\026\n\016bigdecimal_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016leas" + + "t_sig_bits\030\001 \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"" + + "K\n\023NamespaceDescriptor\022\014\n\004name\030\001 \002(\014\022&\n\r" + + "configuration\030\002 \003(\0132\017.NameStringPair\"$\n\020" + + "RegionServerInfo\022\020\n\010infoPort\030\001 \001(\005*r\n\013Co" + + "mpareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t" + + "\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_E", + "QUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006B>\n*org.ap" + + "ache.hadoop.hbase.protobuf.generatedB\013HB" + + "aseProtosH\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -16545,122 +18131,134 @@ public final class HBaseProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TableSchema_descriptor, new java.lang.String[] { "TableName", "Attributes", "ColumnFamilies", "Configuration", }); - internal_static_ColumnFamilySchema_descriptor = + internal_static_TableState_descriptor = getDescriptor().getMessageTypes().get(2); + internal_static_TableState_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableState_descriptor, + new java.lang.String[] { "State", "Table", "Timestamp", }); + internal_static_TableDescriptor_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_TableDescriptor_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableDescriptor_descriptor, + new java.lang.String[] { "Schema", "State", }); + internal_static_ColumnFamilySchema_descriptor = + getDescriptor().getMessageTypes().get(4); internal_static_ColumnFamilySchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ColumnFamilySchema_descriptor, new java.lang.String[] { "Name", "Attributes", "Configuration", }); internal_static_RegionInfo_descriptor = - getDescriptor().getMessageTypes().get(3); + getDescriptor().getMessageTypes().get(5); internal_static_RegionInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionInfo_descriptor, new java.lang.String[] { "RegionId", "TableName", "StartKey", "EndKey", "Offline", "Split", "ReplicaId", }); internal_static_FavoredNodes_descriptor = - getDescriptor().getMessageTypes().get(4); + getDescriptor().getMessageTypes().get(6); internal_static_FavoredNodes_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_FavoredNodes_descriptor, new java.lang.String[] { "FavoredNode", }); internal_static_RegionSpecifier_descriptor = - getDescriptor().getMessageTypes().get(5); + getDescriptor().getMessageTypes().get(7); internal_static_RegionSpecifier_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionSpecifier_descriptor, new java.lang.String[] { "Type", "Value", }); internal_static_TimeRange_descriptor = - getDescriptor().getMessageTypes().get(6); + getDescriptor().getMessageTypes().get(8); internal_static_TimeRange_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_TimeRange_descriptor, new java.lang.String[] { "From", "To", }); internal_static_ServerName_descriptor = - getDescriptor().getMessageTypes().get(7); + getDescriptor().getMessageTypes().get(9); internal_static_ServerName_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ServerName_descriptor, new java.lang.String[] { "HostName", "Port", "StartCode", }); internal_static_Coprocessor_descriptor = - getDescriptor().getMessageTypes().get(8); + getDescriptor().getMessageTypes().get(10); internal_static_Coprocessor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_Coprocessor_descriptor, new java.lang.String[] { "Name", }); internal_static_NameStringPair_descriptor = - getDescriptor().getMessageTypes().get(9); + getDescriptor().getMessageTypes().get(11); internal_static_NameStringPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameStringPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_NameBytesPair_descriptor = - getDescriptor().getMessageTypes().get(10); + getDescriptor().getMessageTypes().get(12); internal_static_NameBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameBytesPair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_BytesBytesPair_descriptor = - getDescriptor().getMessageTypes().get(11); + getDescriptor().getMessageTypes().get(13); internal_static_BytesBytesPair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BytesBytesPair_descriptor, new java.lang.String[] { "First", "Second", }); internal_static_NameInt64Pair_descriptor = - getDescriptor().getMessageTypes().get(12); + getDescriptor().getMessageTypes().get(14); internal_static_NameInt64Pair_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NameInt64Pair_descriptor, new java.lang.String[] { "Name", "Value", }); internal_static_SnapshotDescription_descriptor = - getDescriptor().getMessageTypes().get(13); + getDescriptor().getMessageTypes().get(15); internal_static_SnapshotDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_SnapshotDescription_descriptor, new java.lang.String[] { "Name", "Table", "CreationTime", "Type", "Version", "Owner", }); internal_static_ProcedureDescription_descriptor = - getDescriptor().getMessageTypes().get(14); + getDescriptor().getMessageTypes().get(16); internal_static_ProcedureDescription_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ProcedureDescription_descriptor, new java.lang.String[] { "Signature", "Instance", "CreationTime", "Configuration", }); internal_static_EmptyMsg_descriptor = - getDescriptor().getMessageTypes().get(15); + getDescriptor().getMessageTypes().get(17); internal_static_EmptyMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_EmptyMsg_descriptor, new java.lang.String[] { }); internal_static_LongMsg_descriptor = - getDescriptor().getMessageTypes().get(16); + getDescriptor().getMessageTypes().get(18); internal_static_LongMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_LongMsg_descriptor, new java.lang.String[] { "LongMsg", }); internal_static_DoubleMsg_descriptor = - getDescriptor().getMessageTypes().get(17); + getDescriptor().getMessageTypes().get(19); internal_static_DoubleMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_DoubleMsg_descriptor, new java.lang.String[] { "DoubleMsg", }); internal_static_BigDecimalMsg_descriptor = - getDescriptor().getMessageTypes().get(18); + getDescriptor().getMessageTypes().get(20); internal_static_BigDecimalMsg_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_BigDecimalMsg_descriptor, new java.lang.String[] { "BigdecimalMsg", }); internal_static_UUID_descriptor = - getDescriptor().getMessageTypes().get(19); + getDescriptor().getMessageTypes().get(21); internal_static_UUID_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_UUID_descriptor, new java.lang.String[] { "LeastSigBits", "MostSigBits", }); internal_static_NamespaceDescriptor_descriptor = - getDescriptor().getMessageTypes().get(20); + getDescriptor().getMessageTypes().get(22); internal_static_NamespaceDescriptor_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_NamespaceDescriptor_descriptor, new java.lang.String[] { "Name", "Configuration", }); internal_static_RegionServerInfo_descriptor = - getDescriptor().getMessageTypes().get(21); + getDescriptor().getMessageTypes().get(23); internal_static_RegionServerInfo_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_RegionServerInfo_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java index ee1ab67cc7d..3189bd43b72 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java @@ -36655,6 +36655,1128 @@ public final class MasterProtos { // @@protoc_insertion_point(class_scope:GetTableNamesResponse) } + public interface GetTableStateRequestOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableName table_name = 1; + /** + * required .TableName table_name = 1; + */ + boolean hasTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName(); + /** + * required .TableName table_name = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class GetTableStateRequest extends + com.google.protobuf.GeneratedMessage + implements GetTableStateRequestOrBuilder { + // Use GetTableStateRequest.newBuilder() to construct. + private GetTableStateRequest(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateRequest defaultInstance; + public static GetTableStateRequest getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateRequest getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableName_.toBuilder(); + } + tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableName_); + tableName_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateRequest(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableName table_name = 1; + public static final int TABLE_NAME_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + return tableName_; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + return tableName_; + } + + private void initFields() { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableName()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableName().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableName_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableName_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) obj; + + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); + if (hasTableName()) { + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableName()) { + hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; + hash = (53 * hash) + getTableName().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableNameFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateRequest_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableNameBuilder_ == null) { + result.tableName_ = tableName_; + } else { + result.tableName_ = tableNameBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance()) return this; + if (other.hasTableName()) { + mergeTableName(other.getTableName()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableName()) { + + return false; + } + if (!getTableName().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableName table_name = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_; + /** + * required .TableName table_name = 1; + */ + public boolean hasTableName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() { + if (tableNameBuilder_ == null) { + return tableName_; + } else { + return tableNameBuilder_.getMessage(); + } + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableName_ = value; + onChanged(); + } else { + tableNameBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder setTableName( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) { + if (tableNameBuilder_ == null) { + tableName_ = builderForValue.build(); + onChanged(); + } else { + tableNameBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) { + if (tableNameBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) { + tableName_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial(); + } else { + tableName_ = value; + } + onChanged(); + } else { + tableNameBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableName table_name = 1; + */ + public Builder clearTableName() { + if (tableNameBuilder_ == null) { + tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance(); + onChanged(); + } else { + tableNameBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableNameFieldBuilder().getBuilder(); + } + /** + * required .TableName table_name = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() { + if (tableNameBuilder_ != null) { + return tableNameBuilder_.getMessageOrBuilder(); + } else { + return tableName_; + } + } + /** + * required .TableName table_name = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> + getTableNameFieldBuilder() { + if (tableNameBuilder_ == null) { + tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>( + tableName_, + getParentForChildren(), + isClean()); + tableName_ = null; + } + return tableNameBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateRequest) + } + + static { + defaultInstance = new GetTableStateRequest(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateRequest) + } + + public interface GetTableStateResponseOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .TableState table_state = 1; + /** + * required .TableState table_state = 1; + */ + boolean hasTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState(); + /** + * required .TableState table_state = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder(); + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class GetTableStateResponse extends + com.google.protobuf.GeneratedMessage + implements GetTableStateResponseOrBuilder { + // Use GetTableStateResponse.newBuilder() to construct. + private GetTableStateResponse(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private GetTableStateResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTableStateResponse defaultInstance; + public static GetTableStateResponse getDefaultInstance() { + return defaultInstance; + } + + public GetTableStateResponse getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private GetTableStateResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder subBuilder = null; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + subBuilder = tableState_.toBuilder(); + } + tableState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableState_); + tableState_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000001; + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTableStateResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTableStateResponse(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required .TableState table_state = 1; + public static final int TABLE_STATE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + return tableState_; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + return tableState_; + } + + private void initFields() { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasTableState()) { + memoizedIsInitialized = 0; + return false; + } + if (!getTableState().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, tableState_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, tableState_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) obj; + + boolean result = true; + result = result && (hasTableState() == other.hasTableState()); + if (hasTableState()) { + result = result && getTableState() + .equals(other.getTableState()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasTableState()) { + hash = (37 * hash) + TABLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getTableState().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code GetTableStateResponse} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getTableStateFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_GetTableStateResponse_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse build() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + if (tableStateBuilder_ == null) { + result.tableState_ = tableState_; + } else { + result.tableState_ = tableStateBuilder_.build(); + } + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()) return this; + if (other.hasTableState()) { + mergeTableState(other.getTableState()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasTableState()) { + + return false; + } + if (!getTableState().isInitialized()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .TableState table_state = 1; + private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> tableStateBuilder_; + /** + * required .TableState table_state = 1; + */ + public boolean hasTableState() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState getTableState() { + if (tableStateBuilder_ == null) { + return tableState_; + } else { + return tableStateBuilder_.getMessage(); + } + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableState_ = value; + onChanged(); + } else { + tableStateBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder setTableState( + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder builderForValue) { + if (tableStateBuilder_ == null) { + tableState_ = builderForValue.build(); + onChanged(); + } else { + tableStateBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder mergeTableState(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState value) { + if (tableStateBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001) && + tableState_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance()) { + tableState_ = + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.newBuilder(tableState_).mergeFrom(value).buildPartial(); + } else { + tableState_ = value; + } + onChanged(); + } else { + tableStateBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000001; + return this; + } + /** + * required .TableState table_state = 1; + */ + public Builder clearTableState() { + if (tableStateBuilder_ == null) { + tableState_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.getDefaultInstance(); + onChanged(); + } else { + tableStateBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder getTableStateBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return getTableStateFieldBuilder().getBuilder(); + } + /** + * required .TableState table_state = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder getTableStateOrBuilder() { + if (tableStateBuilder_ != null) { + return tableStateBuilder_.getMessageOrBuilder(); + } else { + return tableState_; + } + } + /** + * required .TableState table_state = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder> + getTableStateFieldBuilder() { + if (tableStateBuilder_ == null) { + tableStateBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableState.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableStateOrBuilder>( + tableState_, + getParentForChildren(), + isClean()); + tableState_ = null; + } + return tableStateBuilder_; + } + + // @@protoc_insertion_point(builder_scope:GetTableStateResponse) + } + + static { + defaultInstance = new GetTableStateResponse(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:GetTableStateResponse) + } + public interface GetClusterStatusRequestOrBuilder extends com.google.protobuf.MessageOrBuilder { } @@ -41176,6 +42298,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+       ** returns table state 
+       * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + } public static com.google.protobuf.Service newReflectiveService( @@ -41525,6 +42659,14 @@ public final class MasterProtos { impl.listTableNamesByNamespace(controller, request, done); } + @java.lang.Override + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + impl.getTableState(controller, request, done); + } + }; } @@ -41633,6 +42775,8 @@ public final class MasterProtos { return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request); case 42: return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request); + case 43: + return impl.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41733,6 +42877,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -41833,6 +42979,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42383,6 +43531,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request, com.google.protobuf.RpcCallback done); + /** + * rpc GetTableState(.GetTableStateRequest) returns (.GetTableStateResponse); + * + *
+     ** returns table state 
+     * 
+ */ + public abstract void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done); + public static final com.google.protobuf.Descriptors.ServiceDescriptor getDescriptor() { @@ -42620,6 +43780,11 @@ public final class MasterProtos { com.google.protobuf.RpcUtil.specializeCallback( done)); return; + case 43: + this.getTableState(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest)request, + com.google.protobuf.RpcUtil.specializeCallback( + done)); + return; default: throw new java.lang.AssertionError("Can't get here."); } @@ -42720,6 +43885,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -42820,6 +43987,8 @@ public final class MasterProtos { return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance(); case 42: return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance(); + case 43: + return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(); default: throw new java.lang.AssertionError("Can't get here."); } @@ -43485,6 +44654,21 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance())); } + + public void getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request, + com.google.protobuf.RpcCallback done) { + channel.callMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance(), + com.google.protobuf.RpcUtil.generalizeCallback( + done, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.class, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance())); + } } public static BlockingInterface newBlockingStub( @@ -43707,6 +44891,11 @@ public final class MasterProtos { com.google.protobuf.RpcController controller, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request) throws com.google.protobuf.ServiceException; + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException; } private static final class BlockingStub implements BlockingInterface { @@ -44231,6 +45420,18 @@ public final class MasterProtos { org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()); } + + public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse getTableState( + com.google.protobuf.RpcController controller, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateRequest request) + throws com.google.protobuf.ServiceException { + return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse) channel.callBlockingMethod( + getDescriptor().getMethods().get(43), + controller, + request, + org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableStateResponse.getDefaultInstance()); + } + } // @@protoc_insertion_point(class_scope:MasterService) @@ -44606,6 +45807,16 @@ public final class MasterProtos { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetTableNamesResponse_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateRequest_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateRequest_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_GetTableStateResponse_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_GetTableStateResponse_fieldAccessorTable; private static com.google.protobuf.Descriptors.Descriptor internal_static_GetClusterStatusRequest_descriptor; private static @@ -44752,97 +45963,101 @@ public final class MasterProtos { "ponse\022\"\n\014table_schema\030\001 \003(\0132\014.TableSchem" + "a\"\026\n\024GetTableNamesRequest\"8\n\025GetTableNam" + "esResponse\022\037\n\013table_names\030\001 \003(\0132\n.TableN" + - "ame\"\031\n\027GetClusterStatusRequest\"B\n\030GetClu" + - "sterStatusResponse\022&\n\016cluster_status\030\001 \002" + - "(\0132\016.ClusterStatus\"\030\n\026IsMasterRunningReq", - "uest\"4\n\027IsMasterRunningResponse\022\031\n\021is_ma" + - "ster_running\030\001 \002(\010\"@\n\024ExecProcedureReque" + - "st\022(\n\tprocedure\030\001 \002(\0132\025.ProcedureDescrip" + - "tion\"F\n\025ExecProcedureResponse\022\030\n\020expecte" + - "d_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026" + - "IsProcedureDoneRequest\022(\n\tprocedure\030\001 \001(" + - "\0132\025.ProcedureDescription\"W\n\027IsProcedureD" + - "oneResponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snap" + - "shot\030\002 \001(\0132\025.ProcedureDescription2\365\027\n\rMa" + - "sterService\022S\n\024GetSchemaAlterStatus\022\034.Ge", - "tSchemaAlterStatusRequest\032\035.GetSchemaAlt" + - "erStatusResponse\022P\n\023GetTableDescriptors\022" + - "\033.GetTableDescriptorsRequest\032\034.GetTableD" + - "escriptorsResponse\022>\n\rGetTableNames\022\025.Ge" + - "tTableNamesRequest\032\026.GetTableNamesRespon" + - "se\022G\n\020GetClusterStatus\022\030.GetClusterStatu" + - "sRequest\032\031.GetClusterStatusResponse\022D\n\017I" + - "sMasterRunning\022\027.IsMasterRunningRequest\032" + - "\030.IsMasterRunningResponse\0222\n\tAddColumn\022\021" + - ".AddColumnRequest\032\022.AddColumnResponse\022;\n", - "\014DeleteColumn\022\024.DeleteColumnRequest\032\025.De" + - "leteColumnResponse\022;\n\014ModifyColumn\022\024.Mod" + - "ifyColumnRequest\032\025.ModifyColumnResponse\022" + - "5\n\nMoveRegion\022\022.MoveRegionRequest\032\023.Move" + - "RegionResponse\022Y\n\026DispatchMergingRegions" + - "\022\036.DispatchMergingRegionsRequest\032\037.Dispa" + - "tchMergingRegionsResponse\022;\n\014AssignRegio" + - "n\022\024.AssignRegionRequest\032\025.AssignRegionRe" + - "sponse\022A\n\016UnassignRegion\022\026.UnassignRegio" + - "nRequest\032\027.UnassignRegionResponse\022>\n\rOff", - "lineRegion\022\025.OfflineRegionRequest\032\026.Offl" + - "ineRegionResponse\0228\n\013DeleteTable\022\023.Delet" + - "eTableRequest\032\024.DeleteTableResponse\022>\n\rt" + - "runcateTable\022\025.TruncateTableRequest\032\026.Tr" + - "uncateTableResponse\0228\n\013EnableTable\022\023.Ena" + - "bleTableRequest\032\024.EnableTableResponse\022;\n" + - "\014DisableTable\022\024.DisableTableRequest\032\025.Di" + - "sableTableResponse\0228\n\013ModifyTable\022\023.Modi" + - "fyTableRequest\032\024.ModifyTableResponse\0228\n\013" + - "CreateTable\022\023.CreateTableRequest\032\024.Creat", - "eTableResponse\022/\n\010Shutdown\022\020.ShutdownReq" + - "uest\032\021.ShutdownResponse\0225\n\nStopMaster\022\022." + - "StopMasterRequest\032\023.StopMasterResponse\022," + - "\n\007Balance\022\017.BalanceRequest\032\020.BalanceResp" + - "onse\022M\n\022SetBalancerRunning\022\032.SetBalancer" + - "RunningRequest\032\033.SetBalancerRunningRespo" + - "nse\022A\n\016RunCatalogScan\022\026.RunCatalogScanRe" + - "quest\032\027.RunCatalogScanResponse\022S\n\024Enable" + - "CatalogJanitor\022\034.EnableCatalogJanitorReq" + - "uest\032\035.EnableCatalogJanitorResponse\022\\\n\027I", - "sCatalogJanitorEnabled\022\037.IsCatalogJanito" + - "rEnabledRequest\032 .IsCatalogJanitorEnable" + - "dResponse\022L\n\021ExecMasterService\022\032.Coproce" + - "ssorServiceRequest\032\033.CoprocessorServiceR" + - "esponse\022/\n\010Snapshot\022\020.SnapshotRequest\032\021." + - "SnapshotResponse\022V\n\025GetCompletedSnapshot" + - "s\022\035.GetCompletedSnapshotsRequest\032\036.GetCo" + - "mpletedSnapshotsResponse\022A\n\016DeleteSnapsh" + - "ot\022\026.DeleteSnapshotRequest\032\027.DeleteSnaps" + - "hotResponse\022A\n\016IsSnapshotDone\022\026.IsSnapsh", - "otDoneRequest\032\027.IsSnapshotDoneResponse\022D" + - "\n\017RestoreSnapshot\022\027.RestoreSnapshotReque" + - "st\032\030.RestoreSnapshotResponse\022V\n\025IsRestor" + - "eSnapshotDone\022\035.IsRestoreSnapshotDoneReq" + - "uest\032\036.IsRestoreSnapshotDoneResponse\022>\n\r" + - "ExecProcedure\022\025.ExecProcedureRequest\032\026.E" + - "xecProcedureResponse\022E\n\024ExecProcedureWit" + - "hRet\022\025.ExecProcedureRequest\032\026.ExecProced" + - "ureResponse\022D\n\017IsProcedureDone\022\027.IsProce" + - "dureDoneRequest\032\030.IsProcedureDoneRespons", - "e\022D\n\017ModifyNamespace\022\027.ModifyNamespaceRe" + - "quest\032\030.ModifyNamespaceResponse\022D\n\017Creat" + - "eNamespace\022\027.CreateNamespaceRequest\032\030.Cr" + - "eateNamespaceResponse\022D\n\017DeleteNamespace" + - "\022\027.DeleteNamespaceRequest\032\030.DeleteNamesp" + - "aceResponse\022Y\n\026GetNamespaceDescriptor\022\036." + - "GetNamespaceDescriptorRequest\032\037.GetNames" + - "paceDescriptorResponse\022_\n\030ListNamespaceD" + - "escriptors\022 .ListNamespaceDescriptorsReq" + - "uest\032!.ListNamespaceDescriptorsResponse\022", - "t\n\037ListTableDescriptorsByNamespace\022\'.Lis" + - "tTableDescriptorsByNamespaceRequest\032(.Li" + - "stTableDescriptorsByNamespaceResponse\022b\n" + - "\031ListTableNamesByNamespace\022!.ListTableNa" + - "mesByNamespaceRequest\032\".ListTableNamesBy" + - "NamespaceResponseBB\n*org.apache.hadoop.h" + - "base.protobuf.generatedB\014MasterProtosH\001\210" + - "\001\001\240\001\001" + "ame\"6\n\024GetTableStateRequest\022\036\n\ntable_nam" + + "e\030\001 \002(\0132\n.TableName\"9\n\025GetTableStateResp" + + "onse\022 \n\013table_state\030\001 \002(\0132\013.TableState\"\031", + "\n\027GetClusterStatusRequest\"B\n\030GetClusterS" + + "tatusResponse\022&\n\016cluster_status\030\001 \002(\0132\016." + + "ClusterStatus\"\030\n\026IsMasterRunningRequest\"" + + "4\n\027IsMasterRunningResponse\022\031\n\021is_master_" + + "running\030\001 \002(\010\"@\n\024ExecProcedureRequest\022(\n" + + "\tprocedure\030\001 \002(\0132\025.ProcedureDescription\"" + + "F\n\025ExecProcedureResponse\022\030\n\020expected_tim" + + "eout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"B\n\026IsPro" + + "cedureDoneRequest\022(\n\tprocedure\030\001 \001(\0132\025.P" + + "rocedureDescription\"W\n\027IsProcedureDoneRe", + "sponse\022\023\n\004done\030\001 \001(\010:\005false\022\'\n\010snapshot\030" + + "\002 \001(\0132\025.ProcedureDescription2\265\030\n\rMasterS" + + "ervice\022S\n\024GetSchemaAlterStatus\022\034.GetSche" + + "maAlterStatusRequest\032\035.GetSchemaAlterSta" + + "tusResponse\022P\n\023GetTableDescriptors\022\033.Get" + + "TableDescriptorsRequest\032\034.GetTableDescri" + + "ptorsResponse\022>\n\rGetTableNames\022\025.GetTabl" + + "eNamesRequest\032\026.GetTableNamesResponse\022G\n" + + "\020GetClusterStatus\022\030.GetClusterStatusRequ" + + "est\032\031.GetClusterStatusResponse\022D\n\017IsMast", + "erRunning\022\027.IsMasterRunningRequest\032\030.IsM" + + "asterRunningResponse\0222\n\tAddColumn\022\021.AddC" + + "olumnRequest\032\022.AddColumnResponse\022;\n\014Dele" + + "teColumn\022\024.DeleteColumnRequest\032\025.DeleteC" + + "olumnResponse\022;\n\014ModifyColumn\022\024.ModifyCo" + + "lumnRequest\032\025.ModifyColumnResponse\0225\n\nMo" + + "veRegion\022\022.MoveRegionRequest\032\023.MoveRegio" + + "nResponse\022Y\n\026DispatchMergingRegions\022\036.Di" + + "spatchMergingRegionsRequest\032\037.DispatchMe" + + "rgingRegionsResponse\022;\n\014AssignRegion\022\024.A", + "ssignRegionRequest\032\025.AssignRegionRespons" + + "e\022A\n\016UnassignRegion\022\026.UnassignRegionRequ" + + "est\032\027.UnassignRegionResponse\022>\n\rOfflineR" + + "egion\022\025.OfflineRegionRequest\032\026.OfflineRe" + + "gionResponse\0228\n\013DeleteTable\022\023.DeleteTabl" + + "eRequest\032\024.DeleteTableResponse\022>\n\rtrunca" + + "teTable\022\025.TruncateTableRequest\032\026.Truncat" + + "eTableResponse\0228\n\013EnableTable\022\023.EnableTa" + + "bleRequest\032\024.EnableTableResponse\022;\n\014Disa" + + "bleTable\022\024.DisableTableRequest\032\025.Disable", + "TableResponse\0228\n\013ModifyTable\022\023.ModifyTab" + + "leRequest\032\024.ModifyTableResponse\0228\n\013Creat" + + "eTable\022\023.CreateTableRequest\032\024.CreateTabl" + + "eResponse\022/\n\010Shutdown\022\020.ShutdownRequest\032" + + "\021.ShutdownResponse\0225\n\nStopMaster\022\022.StopM" + + "asterRequest\032\023.StopMasterResponse\022,\n\007Bal" + + "ance\022\017.BalanceRequest\032\020.BalanceResponse\022" + + "M\n\022SetBalancerRunning\022\032.SetBalancerRunni" + + "ngRequest\032\033.SetBalancerRunningResponse\022A" + + "\n\016RunCatalogScan\022\026.RunCatalogScanRequest", + "\032\027.RunCatalogScanResponse\022S\n\024EnableCatal" + + "ogJanitor\022\034.EnableCatalogJanitorRequest\032" + + "\035.EnableCatalogJanitorResponse\022\\\n\027IsCata" + + "logJanitorEnabled\022\037.IsCatalogJanitorEnab" + + "ledRequest\032 .IsCatalogJanitorEnabledResp" + + "onse\022L\n\021ExecMasterService\022\032.CoprocessorS" + + "erviceRequest\032\033.CoprocessorServiceRespon" + + "se\022/\n\010Snapshot\022\020.SnapshotRequest\032\021.Snaps" + + "hotResponse\022V\n\025GetCompletedSnapshots\022\035.G" + + "etCompletedSnapshotsRequest\032\036.GetComplet", + "edSnapshotsResponse\022A\n\016DeleteSnapshot\022\026." + + "DeleteSnapshotRequest\032\027.DeleteSnapshotRe" + + "sponse\022A\n\016IsSnapshotDone\022\026.IsSnapshotDon" + + "eRequest\032\027.IsSnapshotDoneResponse\022D\n\017Res" + + "toreSnapshot\022\027.RestoreSnapshotRequest\032\030." + + "RestoreSnapshotResponse\022V\n\025IsRestoreSnap" + + "shotDone\022\035.IsRestoreSnapshotDoneRequest\032" + + "\036.IsRestoreSnapshotDoneResponse\022>\n\rExecP" + + "rocedure\022\025.ExecProcedureRequest\032\026.ExecPr" + + "ocedureResponse\022E\n\024ExecProcedureWithRet\022", + "\025.ExecProcedureRequest\032\026.ExecProcedureRe" + + "sponse\022D\n\017IsProcedureDone\022\027.IsProcedureD" + + "oneRequest\032\030.IsProcedureDoneResponse\022D\n\017" + + "ModifyNamespace\022\027.ModifyNamespaceRequest" + + "\032\030.ModifyNamespaceResponse\022D\n\017CreateName" + + "space\022\027.CreateNamespaceRequest\032\030.CreateN" + + "amespaceResponse\022D\n\017DeleteNamespace\022\027.De" + + "leteNamespaceRequest\032\030.DeleteNamespaceRe" + + "sponse\022Y\n\026GetNamespaceDescriptor\022\036.GetNa" + + "mespaceDescriptorRequest\032\037.GetNamespaceD", + "escriptorResponse\022_\n\030ListNamespaceDescri" + + "ptors\022 .ListNamespaceDescriptorsRequest\032" + + "!.ListNamespaceDescriptorsResponse\022t\n\037Li" + + "stTableDescriptorsByNamespace\022\'.ListTabl" + + "eDescriptorsByNamespaceRequest\032(.ListTab" + + "leDescriptorsByNamespaceResponse\022b\n\031List" + + "TableNamesByNamespace\022!.ListTableNamesBy" + + "NamespaceRequest\032\".ListTableNamesByNames" + + "paceResponse\022>\n\rGetTableState\022\025.GetTable" + + "StateRequest\032\026.GetTableStateResponseBB\n*", + "org.apache.hadoop.hbase.protobuf.generat" + + "edB\014MasterProtosH\001\210\001\001\240\001\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -45293,50 +46508,62 @@ public final class MasterProtos { com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetTableNamesResponse_descriptor, new java.lang.String[] { "TableNames", }); - internal_static_GetClusterStatusRequest_descriptor = + internal_static_GetTableStateRequest_descriptor = getDescriptor().getMessageTypes().get(74); + internal_static_GetTableStateRequest_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateRequest_descriptor, + new java.lang.String[] { "TableName", }); + internal_static_GetTableStateResponse_descriptor = + getDescriptor().getMessageTypes().get(75); + internal_static_GetTableStateResponse_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTableStateResponse_descriptor, + new java.lang.String[] { "TableState", }); + internal_static_GetClusterStatusRequest_descriptor = + getDescriptor().getMessageTypes().get(76); internal_static_GetClusterStatusRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusRequest_descriptor, new java.lang.String[] { }); internal_static_GetClusterStatusResponse_descriptor = - getDescriptor().getMessageTypes().get(75); + getDescriptor().getMessageTypes().get(77); internal_static_GetClusterStatusResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_GetClusterStatusResponse_descriptor, new java.lang.String[] { "ClusterStatus", }); internal_static_IsMasterRunningRequest_descriptor = - getDescriptor().getMessageTypes().get(76); + getDescriptor().getMessageTypes().get(78); internal_static_IsMasterRunningRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningRequest_descriptor, new java.lang.String[] { }); internal_static_IsMasterRunningResponse_descriptor = - getDescriptor().getMessageTypes().get(77); + getDescriptor().getMessageTypes().get(79); internal_static_IsMasterRunningResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsMasterRunningResponse_descriptor, new java.lang.String[] { "IsMasterRunning", }); internal_static_ExecProcedureRequest_descriptor = - getDescriptor().getMessageTypes().get(78); + getDescriptor().getMessageTypes().get(80); internal_static_ExecProcedureRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_ExecProcedureResponse_descriptor = - getDescriptor().getMessageTypes().get(79); + getDescriptor().getMessageTypes().get(81); internal_static_ExecProcedureResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_ExecProcedureResponse_descriptor, new java.lang.String[] { "ExpectedTimeout", "ReturnData", }); internal_static_IsProcedureDoneRequest_descriptor = - getDescriptor().getMessageTypes().get(80); + getDescriptor().getMessageTypes().get(82); internal_static_IsProcedureDoneRequest_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneRequest_descriptor, new java.lang.String[] { "Procedure", }); internal_static_IsProcedureDoneResponse_descriptor = - getDescriptor().getMessageTypes().get(81); + getDescriptor().getMessageTypes().get(83); internal_static_IsProcedureDoneResponse_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_IsProcedureDoneResponse_descriptor, diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java index 6da497e1b15..a86c4a605db 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java @@ -3242,12 +3242,12 @@ public final class ZooKeeperProtos { // @@protoc_insertion_point(class_scope:SplitLogTask) } - public interface TableOrBuilder + public interface DeprecatedTableStateOrBuilder extends com.google.protobuf.MessageOrBuilder { - // required .Table.State state = 1 [default = ENABLED]; + // required .DeprecatedTableState.State state = 1 [default = ENABLED]; /** - * required .Table.State state = 1 [default = ENABLED]; + * required .DeprecatedTableState.State state = 1 [default = ENABLED]; * *
      * This is the table's state.  If no znode for a table,
@@ -3257,7 +3257,7 @@ public final class ZooKeeperProtos {
      */
     boolean hasState();
     /**
-     * required .Table.State state = 1 [default = ENABLED];
+     * required .DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -3265,32 +3265,33 @@ public final class ZooKeeperProtos {
      * for more.
      * 
*/ - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState(); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState(); } /** - * Protobuf type {@code Table} + * Protobuf type {@code DeprecatedTableState} * *
    **
    * The znode that holds state of table.
+   * Deprected, table state is stored in table descriptor on HDFS.
    * 
*/ - public static final class Table extends + public static final class DeprecatedTableState extends com.google.protobuf.GeneratedMessage - implements TableOrBuilder { - // Use Table.newBuilder() to construct. - private Table(com.google.protobuf.GeneratedMessage.Builder builder) { + implements DeprecatedTableStateOrBuilder { + // Use DeprecatedTableState.newBuilder() to construct. + private DeprecatedTableState(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); this.unknownFields = builder.getUnknownFields(); } - private Table(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + private DeprecatedTableState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - private static final Table defaultInstance; - public static Table getDefaultInstance() { + private static final DeprecatedTableState defaultInstance; + public static DeprecatedTableState getDefaultInstance() { return defaultInstance; } - public Table getDefaultInstanceForType() { + public DeprecatedTableState getDefaultInstanceForType() { return defaultInstance; } @@ -3300,7 +3301,7 @@ public final class ZooKeeperProtos { getUnknownFields() { return this.unknownFields; } - private Table( + private DeprecatedTableState( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3325,7 +3326,7 @@ public final class ZooKeeperProtos { } case 8: { int rawValue = input.readEnum(); - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.valueOf(rawValue); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { @@ -3348,33 +3349,33 @@ public final class ZooKeeperProtos { } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_DeprecatedTableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_DeprecatedTableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); } - public static com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser
() { - public Table parsePartialFrom( + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public DeprecatedTableState parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - return new Table(input, extensionRegistry); + return new DeprecatedTableState(input, extensionRegistry); } }; @java.lang.Override - public com.google.protobuf.Parser
getParserForType() { + public com.google.protobuf.Parser getParserForType() { return PARSER; } /** - * Protobuf enum {@code Table.State} + * Protobuf enum {@code DeprecatedTableState.State} * *
      * Table's current state
@@ -3452,7 +3453,7 @@ public final class ZooKeeperProtos {
       }
       public static final com.google.protobuf.Descriptors.EnumDescriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDescriptor().getEnumTypes().get(0);
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDescriptor().getEnumTypes().get(0);
       }
 
       private static final State[] VALUES = values();
@@ -3474,15 +3475,15 @@ public final class ZooKeeperProtos {
         this.value = value;
       }
 
-      // @@protoc_insertion_point(enum_scope:Table.State)
+      // @@protoc_insertion_point(enum_scope:DeprecatedTableState.State)
     }
 
     private int bitField0_;
-    // required .Table.State state = 1 [default = ENABLED];
+    // required .DeprecatedTableState.State state = 1 [default = ENABLED];
     public static final int STATE_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_;
     /**
-     * required .Table.State state = 1 [default = ENABLED];
+     * required .DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -3494,7 +3495,7 @@ public final class ZooKeeperProtos {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * required .Table.State state = 1 [default = ENABLED];
+     * required .DeprecatedTableState.State state = 1 [default = ENABLED];
      *
      * 
      * This is the table's state.  If no znode for a table,
@@ -3502,12 +3503,12 @@ public final class ZooKeeperProtos {
      * for more.
      * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { return state_; } private void initFields() { - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { @@ -3558,10 +3559,10 @@ public final class ZooKeeperProtos { if (obj == this) { return true; } - if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)) { + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)) { return super.equals(obj); } - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) obj; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) obj; boolean result = true; result = result && (hasState() == other.hasState()); @@ -3591,53 +3592,53 @@ public final class ZooKeeperProtos { return hash; } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(byte[] data) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom(java.io.InputStream input) + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { return PARSER.parseDelimitedFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseDelimitedFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return PARSER.parseDelimitedFrom(input, extensionRegistry); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return PARSER.parseFrom(input); } - public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parseFrom( + public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { @@ -3646,7 +3647,7 @@ public final class ZooKeeperProtos { public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table prototype) { + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } @@ -3658,29 +3659,30 @@ public final class ZooKeeperProtos { return builder; } /** - * Protobuf type {@code Table} + * Protobuf type {@code DeprecatedTableState} * *
      **
      * The znode that holds state of table.
+     * Deprected, table state is stored in table descriptor on HDFS.
      * 
*/ public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder - implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableOrBuilder { + implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableStateOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_DeprecatedTableState_descriptor; } protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_fieldAccessorTable + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_DeprecatedTableState_fieldAccessorTable .ensureFieldAccessorsInitialized( - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.Builder.class); + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.Builder.class); } - // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.newBuilder() + // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.newBuilder() private Builder() { maybeForceBuilderInitialization(); } @@ -3700,7 +3702,7 @@ public final class ZooKeeperProtos { public Builder clear() { super.clear(); - state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; bitField0_ = (bitField0_ & ~0x00000001); return this; } @@ -3711,23 +3713,23 @@ public final class ZooKeeperProtos { public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_Table_descriptor; + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_DeprecatedTableState_descriptor; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table getDefaultInstanceForType() { - return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance(); } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table build() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = buildPartial(); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState build() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table buildPartial() { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table(this); + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { @@ -3740,16 +3742,16 @@ public final class ZooKeeperProtos { } public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) { - return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table)other); + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState)other); } else { super.mergeFrom(other); return this; } } - public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table other) { - if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.getDefaultInstance()) return this; + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.getDefaultInstance()) return this; if (other.hasState()) { setState(other.getState()); } @@ -3769,11 +3771,11 @@ public final class ZooKeeperProtos { com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table parsedMessage = null; + org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table) e.getUnfinishedMessage(); + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState) e.getUnfinishedMessage(); throw e; } finally { if (parsedMessage != null) { @@ -3784,10 +3786,10 @@ public final class ZooKeeperProtos { } private int bitField0_; - // required .Table.State state = 1 [default = ENABLED]; - private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED; + // required .DeprecatedTableState.State state = 1 [default = ENABLED]; + private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED; /** - * required .Table.State state = 1 [default = ENABLED]; + * required .DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -3799,7 +3801,7 @@ public final class ZooKeeperProtos {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * required .Table.State state = 1 [default = ENABLED];
+       * required .DeprecatedTableState.State state = 1 [default = ENABLED];
        *
        * 
        * This is the table's state.  If no znode for a table,
@@ -3807,11 +3809,11 @@ public final class ZooKeeperProtos {
        * for more.
        * 
*/ - public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State getState() { + public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State getState() { return state_; } /** - * required .Table.State state = 1 [default = ENABLED]; + * required .DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -3819,7 +3821,7 @@ public final class ZooKeeperProtos {
        * for more.
        * 
*/ - public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State value) { + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State value) { if (value == null) { throw new NullPointerException(); } @@ -3829,7 +3831,7 @@ public final class ZooKeeperProtos { return this; } /** - * required .Table.State state = 1 [default = ENABLED]; + * required .DeprecatedTableState.State state = 1 [default = ENABLED]; * *
        * This is the table's state.  If no znode for a table,
@@ -3839,20 +3841,20 @@ public final class ZooKeeperProtos {
        */
       public Builder clearState() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table.State.ENABLED;
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State.ENABLED;
         onChanged();
         return this;
       }
 
-      // @@protoc_insertion_point(builder_scope:Table)
+      // @@protoc_insertion_point(builder_scope:DeprecatedTableState)
     }
 
     static {
-      defaultInstance = new Table(true);
+      defaultInstance = new DeprecatedTableState(true);
       defaultInstance.initFields();
     }
 
-    // @@protoc_insertion_point(class_scope:Table)
+    // @@protoc_insertion_point(class_scope:DeprecatedTableState)
   }
 
   public interface ReplicationPeerOrBuilder
@@ -9512,10 +9514,10 @@ public final class ZooKeeperProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_SplitLogTask_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_Table_descriptor;
+    internal_static_DeprecatedTableState_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_Table_fieldAccessorTable;
+      internal_static_DeprecatedTableState_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_ReplicationPeer_descriptor;
   private static
@@ -9573,27 +9575,28 @@ public final class ZooKeeperProtos {
       "UNASSIGNED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n" +
       "\004DONE\020\003\022\007\n\003ERR\020\004\">\n\014RecoveryMode\022\013\n\007UNKN" +
       "OWN\020\000\022\021\n\rLOG_SPLITTING\020\001\022\016\n\nLOG_REPLAY\020\002" +
-      "\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007" +
-      "ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLE" +
-      "D\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017Rep" +
-      "licationPeer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027repl" +
-      "icationEndpointImpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132" +
-      "\017.BytesBytesPair\022&\n\rconfiguration\030\004 \003(\0132" +
-      "\017.NameStringPair\"^\n\020ReplicationState\022&\n\005",
-      "state\030\001 \002(\0162\027.ReplicationState.State\"\"\n\005" +
-      "State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027Repl" +
-      "icationHLogPosition\022\020\n\010position\030\001 \002(\003\"%\n" +
-      "\017ReplicationLock\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n" +
-      "\tTableLock\022\036\n\ntable_name\030\001 \001(\0132\n.TableNa" +
-      "me\022\037\n\nlock_owner\030\002 \001(\0132\013.ServerName\022\021\n\tt" +
-      "hread_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007pur" +
-      "pose\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\";\n\017Store" +
-      "SequenceId\022\023\n\013family_name\030\001 \002(\014\022\023\n\013seque" +
-      "nce_id\030\002 \002(\004\"g\n\026RegionStoreSequenceIds\022 ",
-      "\n\030last_flushed_sequence_id\030\001 \002(\004\022+\n\021stor" +
-      "e_sequence_id\030\002 \003(\0132\020.StoreSequenceIdBE\n" +
-      "*org.apache.hadoop.hbase.protobuf.genera" +
-      "tedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "\"\214\001\n\024DeprecatedTableState\0223\n\005state\030\001 \002(\016" +
+      "2\033.DeprecatedTableState.State:\007ENABLED\"?" +
+      "\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED\020\001\022\r\n\tDI" +
+      "SABLING\020\002\022\014\n\010ENABLING\020\003\"\215\001\n\017ReplicationP" +
+      "eer\022\022\n\nclusterkey\030\001 \002(\t\022\037\n\027replicationEn" +
+      "dpointImpl\030\002 \001(\t\022\035\n\004data\030\003 \003(\0132\017.BytesBy" +
+      "tesPair\022&\n\rconfiguration\030\004 \003(\0132\017.NameStr",
+      "ingPair\"^\n\020ReplicationState\022&\n\005state\030\001 \002" +
+      "(\0162\027.ReplicationState.State\"\"\n\005State\022\013\n\007" +
+      "ENABLED\020\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHL" +
+      "ogPosition\022\020\n\010position\030\001 \002(\003\"%\n\017Replicat" +
+      "ionLock\022\022\n\nlock_owner\030\001 \002(\t\"\230\001\n\tTableLoc" +
+      "k\022\036\n\ntable_name\030\001 \001(\0132\n.TableName\022\037\n\nloc" +
+      "k_owner\030\002 \001(\0132\013.ServerName\022\021\n\tthread_id\030" +
+      "\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(" +
+      "\t\022\023\n\013create_time\030\006 \001(\003\";\n\017StoreSequenceI" +
+      "d\022\023\n\013family_name\030\001 \002(\014\022\023\n\013sequence_id\030\002 ",
+      "\002(\004\"g\n\026RegionStoreSequenceIds\022 \n\030last_fl" +
+      "ushed_sequence_id\030\001 \002(\004\022+\n\021store_sequenc" +
+      "e_id\030\002 \003(\0132\020.StoreSequenceIdBE\n*org.apac" +
+      "he.hadoop.hbase.protobuf.generatedB\017ZooK" +
+      "eeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -9624,11 +9627,11 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_SplitLogTask_descriptor,
               new java.lang.String[] { "State", "ServerName", "Mode", });
-          internal_static_Table_descriptor =
+          internal_static_DeprecatedTableState_descriptor =
             getDescriptor().getMessageTypes().get(4);
-          internal_static_Table_fieldAccessorTable = new
+          internal_static_DeprecatedTableState_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_Table_descriptor,
+              internal_static_DeprecatedTableState_descriptor,
               new java.lang.String[] { "State", });
           internal_static_ReplicationPeer_descriptor =
             getDescriptor().getMessageTypes().get(5);
diff --git a/hbase-protocol/src/main/protobuf/HBase.proto b/hbase-protocol/src/main/protobuf/HBase.proto
index ca09777b0c6..252f5321ad4 100644
--- a/hbase-protocol/src/main/protobuf/HBase.proto
+++ b/hbase-protocol/src/main/protobuf/HBase.proto
@@ -44,6 +44,27 @@ message TableSchema {
   repeated NameStringPair configuration = 4;
 }
 
+/** Denotes state of the table */
+message TableState {
+  // Table's current state
+  enum State {
+    ENABLED = 0;
+    DISABLED = 1;
+    DISABLING = 2;
+    ENABLING = 3;
+  }
+  // This is the table's state.
+  required State state = 1;
+  required TableName table = 2;
+  optional uint64 timestamp = 3;
+}
+
+/** On HDFS representation of table state. */
+message TableDescriptor {
+  required TableSchema schema = 1;
+  optional TableState.State state = 2 [ default = ENABLED ];
+}
+
 /**
  * Column Family Schema
  * Inspired by the rest ColumSchemaMessage
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 94ea8603126..85daf438227 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -326,6 +326,14 @@ message GetTableNamesResponse {
   repeated TableName table_names = 1;
 }
 
+message GetTableStateRequest {
+  required TableName table_name = 1;
+}
+
+message GetTableStateResponse {
+  required TableState table_state = 1;
+}
+
 message GetClusterStatusRequest {
 }
 
@@ -565,4 +573,8 @@ service MasterService {
   /** returns a list of tables for a given namespace*/
   rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
     returns(ListTableNamesByNamespaceResponse);
+
+  /** returns table state */
+  rpc GetTableState(GetTableStateRequest)
+    returns(GetTableStateResponse);
 }
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 8acd77875bb..c40fa7740ba 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -87,8 +87,9 @@ message SplitLogTask {
 
 /**
  * The znode that holds state of table.
+ * Deprected, table state is stored in table descriptor on HDFS.
  */
-message Table {
+message DeprecatedTableState {
   // Table's current state
   enum State {
     ENABLED = 0;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
index 2642e298d79..1019b2ddf90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/CoordinatedStateManager.java
@@ -55,12 +55,4 @@ public interface CoordinatedStateManager {
    * @return instance of Server coordinated state manager runs within
    */
   Server getServer();
-
-  /**
-   * Returns implementation of TableStateManager.
-   * @throws InterruptedException if operation is interrupted
-   * @throws CoordinatedStateException if error happens in underlying coordination mechanism
-   */
-  TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
new file mode 100644
index 00000000000..bf38ee53613
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+
+/**
+ * Class represents table state on HDFS.
+ */
+@InterfaceAudience.Private
+public class TableDescriptor {
+  private HTableDescriptor hTableDescriptor;
+  private TableState.State tableState;
+
+  /**
+   * Creates TableDescriptor with all fields.
+   * @param hTableDescriptor HTableDescriptor to use
+   * @param tableState table state
+   */
+  public TableDescriptor(HTableDescriptor hTableDescriptor,
+      TableState.State tableState) {
+    this.hTableDescriptor = hTableDescriptor;
+    this.tableState = tableState;
+  }
+
+  /**
+   * Creates TableDescriptor with Enabled table.
+   * @param hTableDescriptor HTableDescriptor to use
+   */
+  @VisibleForTesting
+  public TableDescriptor(HTableDescriptor hTableDescriptor) {
+    this(hTableDescriptor, TableState.State.ENABLED);
+  }
+
+  /**
+   * Associated HTableDescriptor
+   * @return instance of HTableDescriptor
+   */
+  public HTableDescriptor getHTableDescriptor() {
+    return hTableDescriptor;
+  }
+
+  public void setHTableDescriptor(HTableDescriptor hTableDescriptor) {
+    this.hTableDescriptor = hTableDescriptor;
+  }
+
+  public TableState.State getTableState() {
+    return tableState;
+  }
+
+  public void setTableState(TableState.State tableState) {
+    this.tableState = tableState;
+  }
+
+  /**
+   * Convert to PB.
+   */
+  public HBaseProtos.TableDescriptor convert() {
+    return HBaseProtos.TableDescriptor.newBuilder()
+        .setSchema(hTableDescriptor.convert())
+        .setState(tableState.convert())
+        .build();
+  }
+
+  /**
+   * Convert from PB
+   */
+  public static TableDescriptor convert(HBaseProtos.TableDescriptor proto) {
+    HTableDescriptor hTableDescriptor = HTableDescriptor.convert(proto.getSchema());
+    TableState.State state = TableState.State.convert(proto.getState());
+    return new TableDescriptor(hTableDescriptor, state);
+  }
+
+  /**
+   * @return This instance serialized with pb with pb magic prefix
+   * @see #parseFrom(byte[])
+   */
+  public byte [] toByteArray() {
+    return ProtobufUtil.prependPBMagic(convert().toByteArray());
+  }
+
+  /**
+   * @param bytes A pb serialized {@link TableDescriptor} instance with pb magic prefix
+   * @see #toByteArray()
+   */
+  public static TableDescriptor parseFrom(final byte [] bytes)
+      throws DeserializationException, IOException {
+    if (!ProtobufUtil.isPBMagicPrefix(bytes)) {
+      throw new DeserializationException("Expected PB encoded TableDescriptor");
+    }
+    int pblen = ProtobufUtil.lengthOfPBMagic();
+    HBaseProtos.TableDescriptor.Builder builder = HBaseProtos.TableDescriptor.newBuilder();
+    HBaseProtos.TableDescriptor ts;
+    try {
+      ts = builder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+    } catch (InvalidProtocolBufferException e) {
+      throw new DeserializationException(e);
+    }
+    return convert(ts);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o) return true;
+    if (o == null || getClass() != o.getClass()) return false;
+
+    TableDescriptor that = (TableDescriptor) o;
+
+    if (hTableDescriptor != null ?
+        !hTableDescriptor.equals(that.hTableDescriptor) :
+        that.hTableDescriptor != null) return false;
+    if (tableState != that.tableState) return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = hTableDescriptor != null ? hTableDescriptor.hashCode() : 0;
+    result = 31 * result + (tableState != null ? tableState.hashCode() : 0);
+    return result;
+  }
+
+  @Override
+  public String toString() {
+    return "TableDescriptor{" +
+        "hTableDescriptor=" + hTableDescriptor +
+        ", tableState=" + tableState +
+        '}';
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
index a0c246b71f7..ff5f0b34454 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * Get, remove and modify table descriptors.
@@ -37,6 +36,14 @@ public interface TableDescriptors {
   HTableDescriptor get(final TableName tableName)
   throws IOException;
 
+  /**
+   * @param tableName
+   * @return TableDescriptor for tablename
+   * @throws IOException
+   */
+  TableDescriptor getDescriptor(final TableName tableName)
+      throws IOException;
+
   /**
    * Get Map of all NamespaceDescriptors for a given namespace.
    * @return Map of all descriptors.
@@ -54,6 +61,15 @@ public interface TableDescriptors {
   Map getAll()
   throws IOException;
 
+  /**
+   * Get Map of all TableDescriptors. Populates the descriptor cache as a
+   * side effect.
+   * @return Map of all descriptors.
+   * @throws IOException
+   */
+  Map getAllDescriptors()
+      throws IOException;
+
   /**
    * Add or update descriptor
    * @param htd Descriptor to set into TableDescriptors
@@ -62,6 +78,14 @@ public interface TableDescriptors {
   void add(final HTableDescriptor htd)
   throws IOException;
 
+  /**
+   * Add or update descriptor
+   * @param htd Descriptor to set into TableDescriptors
+   * @throws IOException
+   */
+  void add(final TableDescriptor htd)
+      throws IOException;
+
   /**
    * @param tablename
    * @return Instance of table descriptor or null if none found.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
deleted file mode 100644
index 56cd4ae7096..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableStateManager.java
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-
-import java.io.InterruptedIOException;
-import java.util.Set;
-
-/**
- * Helper class for table state management for operations running inside
- * RegionServer or HMaster.
- * Depending on implementation, fetches information from HBase system table,
- * local data store, ZooKeeper ensemble or somewhere else.
- * Code running on client side (with no coordinated state context) shall instead use
- * {@link org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader}
- */
-@InterfaceAudience.Private
-public interface TableStateManager {
-
-  /**
-   * Sets the table into desired state. Fails silently if the table is already in this state.
-   * @param tableName table to process
-   * @param state new state of this table
-   * @throws CoordinatedStateException if error happened when trying to set table state
-   */
-  void setTableState(TableName tableName, ZooKeeperProtos.Table.State state)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is already in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                  ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * Sets the specified table into the newState, but only if the table is NOT in
-   * one of the possibleCurrentStates (otherwise no operation is performed).
-   * @param tableName table to process
-   * @param newState new state for the table
-   * @param states table should NOT be in one of these states for the operation
-   *                              to be performed
-   * @throws CoordinatedStateException if error happened while performing operation
-   * @return true if operation succeeded, false otherwise
-   */
-  boolean setTableStateIfNotInStates(TableName tableName, ZooKeeperProtos.Table.State newState,
-                                     ZooKeeperProtos.Table.State... states)
-    throws CoordinatedStateException;
-
-  /**
-   * @return true if the table is in any one of the listed states, false otherwise.
-   */
-  boolean isTableState(TableName tableName, ZooKeeperProtos.Table.State... states);
-
-  /**
-   * Mark table as deleted.  Fails silently if the table is not currently marked as disabled.
-   * @param tableName table to be deleted
-   * @throws CoordinatedStateException if error happened while performing operation
-   */
-  void setDeletedTable(TableName tableName) throws CoordinatedStateException;
-
-  /**
-   * Checks if table is present.
-   *
-   * @param tableName table we're checking
-   * @return true if the table is present, false otherwise
-   */
-  boolean isTablePresent(TableName tableName);
-
-  /**
-   * @return set of tables which are in any one of the listed states, empty Set if none
-   */
-  Set getTablesInStates(ZooKeeperProtos.Table.State... states)
-    throws InterruptedIOException, CoordinatedStateException;
-
-  /**
-   * If the table is found in the given state the in-memory state is removed. This
-   * helps in cases where CreateTable is to be retried by the client in case of
-   * failures.  If deletePermanentState is true - the flag kept permanently is
-   * also reset.
-   *
-   * @param tableName table we're working on
-   * @param states if table isn't in any one of these states, operation aborts
-   * @param deletePermanentState if true, reset the permanent flag
-   * @throws CoordinatedStateException if error happened in underlying coordination engine
-   */
-  void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states,
-                            boolean deletePermanentState)
-    throws CoordinatedStateException;
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
index db853ed5175..6654032fe04 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
@@ -118,4 +118,4 @@ public class CoprocessorHConnection extends HConnectionImplementation {
   public NonceGenerator getNonceGenerator() {
     return NO_NONCE_GEN; // don't use nonces for coprocessor connection
   }
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
index 295cefec5ec..cb59dff5526 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/BaseCoordinatedStateManager.java
@@ -18,10 +18,8 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
 
 /**
  * Base class for {@link org.apache.hadoop.hbase.CoordinatedStateManager} implementations.
@@ -49,10 +47,6 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan
     return null;
   }
 
-  @Override
-  public abstract TableStateManager getTableStateManager() throws InterruptedException,
-    CoordinatedStateException;
-
   /**
    * Method to retrieve coordination for split log worker
    */
@@ -61,4 +55,4 @@ public abstract class BaseCoordinatedStateManager implements CoordinatedStateMan
    * Method to retrieve coordination for split log manager
    */
   public abstract SplitLogManagerCoordination getSplitLogManagerCoordination();
-}
\ No newline at end of file
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
index 98500d3733c..cce6091b365 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkCoordinatedStateManager.java
@@ -18,14 +18,9 @@
 package org.apache.hadoop.hbase.coordination;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateManager;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * ZooKeeper-based implementation of {@link org.apache.hadoop.hbase.CoordinatedStateManager}.
@@ -51,16 +46,6 @@ public class ZkCoordinatedStateManager extends BaseCoordinatedStateManager {
     return server;
   }
 
-  @Override
-  public TableStateManager getTableStateManager() throws InterruptedException,
-      CoordinatedStateException {
-    try {
-      return new ZKTableStateManager(server.getZooKeeper());
-    } catch (KeeperException e) {
-      throw new CoordinatedStateException(e);
-    }
-  }
-
   @Override
   public SplitLogWorkerCoordination getSplitLogWorkerCoordination() {
     return splitLogWorkerCoordination;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 5b9ff363a75..7e0a5e29803 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -47,7 +46,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -60,9 +58,9 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.executor.ExecutorService;
@@ -76,8 +74,6 @@ import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
 import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -207,14 +203,14 @@ public class AssignmentManager {
    * @param service Executor service
    * @param metricsMaster metrics manager
    * @param tableLockManager TableLock manager
-   * @throws CoordinatedStateException
    * @throws IOException
    */
   public AssignmentManager(Server server, ServerManager serverManager,
       final LoadBalancer balancer,
       final ExecutorService service, MetricsMaster metricsMaster,
-      final TableLockManager tableLockManager)
-          throws IOException, CoordinatedStateException {
+      final TableLockManager tableLockManager,
+      final TableStateManager tableStateManager)
+          throws IOException {
     this.server = server;
     this.serverManager = serverManager;
     this.executorService = service;
@@ -226,15 +222,9 @@ public class AssignmentManager {
     this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
            HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
            FavoredNodeLoadBalancer.class);
-    try {
-      if (server.getCoordinatedStateManager() != null) {
-        this.tableStateManager = server.getCoordinatedStateManager().getTableStateManager();
-      } else {
-        this.tableStateManager = null;
-      }
-    } catch (InterruptedException e) {
-      throw new InterruptedIOException();
-    }
+
+    this.tableStateManager = tableStateManager;
+
     // This is the max attempts, not retries, so it should be at least 1.
     this.maximumAttempts = Math.max(1,
       this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
@@ -392,10 +382,9 @@ public class AssignmentManager {
    * @throws IOException
    * @throws KeeperException
    * @throws InterruptedException
-   * @throws CoordinatedStateException
    */
   void joinCluster() throws IOException,
-      KeeperException, InterruptedException, CoordinatedStateException {
+          KeeperException, InterruptedException {
     long startTime = System.currentTimeMillis();
     // Concurrency note: In the below the accesses on regionsInTransition are
     // outside of a synchronization block where usually all accesses to RIT are
@@ -430,10 +419,9 @@ public class AssignmentManager {
    *          Map of dead servers and their regions. Can be null.
    * @throws IOException
    * @throws InterruptedException
-   * @throws CoordinatedStateException
    */
   boolean processDeadServersAndRegionsInTransition(final Set deadServers)
-      throws IOException, InterruptedException, CoordinatedStateException {
+          throws IOException, InterruptedException {
     boolean failover = !serverManager.getDeadServers().isEmpty();
     if (failover) {
       // This may not be a failover actually, especially if meta is on this master.
@@ -502,8 +490,8 @@ public class AssignmentManager {
 
     if (!failover) {
       disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING,
-        ZooKeeperProtos.Table.State.ENABLING);
+        TableState.State.DISABLED, TableState.State.DISABLING,
+        TableState.State.ENABLING);
 
       // Clean re/start, mark all user regions closed before reassignment
       allRegions = regionStates.closeAllUserRegions(
@@ -1011,7 +999,7 @@ public class AssignmentManager {
         // will not be in ENABLING or ENABLED state.
         TableName tableName = region.getTable();
         if (!tableStateManager.isTableState(tableName,
-          ZooKeeperProtos.Table.State.ENABLED, ZooKeeperProtos.Table.State.ENABLING)) {
+          TableState.State.ENABLED, TableState.State.ENABLING)) {
           LOG.debug("Setting table " + tableName + " to ENABLED state.");
           setEnabledTable(tableName);
         }
@@ -1147,8 +1135,8 @@ public class AssignmentManager {
 
   private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
     if (this.tableStateManager.isTableState(region.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED,
-        ZooKeeperProtos.Table.State.DISABLING) || replicasToClose.contains(region)) {
+            TableState.State.DISABLED,
+            TableState.State.DISABLING) || replicasToClose.contains(region)) {
       LOG.info("Table " + region.getTable() + " is disabled or disabling;"
         + " skipping assign of " + region.getRegionNameAsString());
       offlineDisabledRegion(region);
@@ -1480,7 +1468,7 @@ public class AssignmentManager {
     for (HRegionInfo hri : regionsFromMetaScan) {
       TableName tableName = hri.getTable();
       if (!tableStateManager.isTableState(tableName,
-          ZooKeeperProtos.Table.State.ENABLED)) {
+              TableState.State.ENABLED)) {
         setEnabledTable(tableName);
       }
     }
@@ -1525,14 +1513,14 @@ public class AssignmentManager {
    * @throws IOException
    */
   Set rebuildUserRegions() throws
-      IOException, KeeperException, CoordinatedStateException {
+          IOException, KeeperException {
     Set disabledOrEnablingTables = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED, TableState.State.ENABLING);
 
     Set disabledOrDisablingOrEnabling = tableStateManager.getTablesInStates(
-      ZooKeeperProtos.Table.State.DISABLED,
-      ZooKeeperProtos.Table.State.DISABLING,
-      ZooKeeperProtos.Table.State.ENABLING);
+            TableState.State.DISABLED,
+            TableState.State.DISABLING,
+            TableState.State.ENABLING);
 
     // Region assignment from META
     List results = MetaTableAccessor.fullScanOfMeta(server.getShortCircuitConnection());
@@ -1597,7 +1585,7 @@ public class AssignmentManager {
         // this will be used in rolling restarts
         if (!disabledOrDisablingOrEnabling.contains(tableName)
           && !getTableStateManager().isTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLED)) {
+                TableState.State.ENABLED)) {
           setEnabledTable(tableName);
         }
       }
@@ -1614,9 +1602,9 @@ public class AssignmentManager {
    * @throws IOException
    */
   private void recoverTableInDisablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set disablingTables =
-      tableStateManager.getTablesInStates(ZooKeeperProtos.Table.State.DISABLING);
+            tableStateManager.getTablesInStates(TableState.State.DISABLING);
     if (disablingTables.size() != 0) {
       for (TableName tableName : disablingTables) {
         // Recover by calling DisableTableHandler
@@ -1638,9 +1626,9 @@ public class AssignmentManager {
    * @throws IOException
    */
   private void recoverTableInEnablingState()
-      throws KeeperException, IOException, CoordinatedStateException {
+          throws KeeperException, IOException {
     Set enablingTables = tableStateManager.
-      getTablesInStates(ZooKeeperProtos.Table.State.ENABLING);
+            getTablesInStates(TableState.State.ENABLING);
     if (enablingTables.size() != 0) {
       for (TableName tableName : enablingTables) {
         // Recover by calling EnableTableHandler
@@ -1675,7 +1663,7 @@ public class AssignmentManager {
       if (!serverManager.isServerOnline(regionState.getServerName())) {
         continue; // SSH will handle it
       }
-      State state = regionState.getState();
+      RegionState.State state = regionState.getState();
       LOG.info("Processing " + regionState);
       switch (state) {
       case CLOSED:
@@ -1809,7 +1797,7 @@ public class AssignmentManager {
                   } catch (InterruptedException ie) {
                     LOG.warn("Failed to unassign "
                       + hri.getRegionNameAsString() + " since interrupted", ie);
-                    regionStates.updateRegionState(hri, State.FAILED_CLOSE);
+                    regionStates.updateRegionState(hri, RegionState.State.FAILED_CLOSE);
                     Thread.currentThread().interrupt();
                     return;
                   }
@@ -1987,7 +1975,7 @@ public class AssignmentManager {
           it.remove();
         } else {
           if (tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+                  TableState.State.DISABLED, TableState.State.DISABLING)) {
             regionStates.regionOffline(hri);
             it.remove();
             continue;
@@ -2009,7 +1997,7 @@ public class AssignmentManager {
     HRegionInfo hri = plan.getRegionInfo();
     TableName tableName = hri.getTable();
     if (tableStateManager.isTableState(tableName,
-      ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
       LOG.info("Ignored moving region of disabling/disabled table "
         + tableName);
       return;
@@ -2043,8 +2031,8 @@ public class AssignmentManager {
   protected void setEnabledTable(TableName tableName) {
     try {
       this.tableStateManager.setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
+              TableState.State.ENABLED);
+    } catch (IOException e) {
       // here we can abort as it is the start up flow
       String errorMsg = "Unable to ensure that the table " + tableName
           + " will be" + " enabled because of a ZooKeeper issue";
@@ -2087,8 +2075,9 @@ public class AssignmentManager {
       if (regionState != null) {
         // When there are more than one region server a new RS is selected as the
         // destination and the same is updated in the region plan. (HBASE-5546)
-        if (getTableStateManager().isTableState(hri.getTable(), Table.State.DISABLED,
-            Table.State.DISABLING) || replicasToClose.contains(hri)) {
+        if (getTableStateManager().isTableState(hri.getTable(),
+                TableState.State.DISABLED, TableState.State.DISABLING) ||
+                replicasToClose.contains(hri)) {
           offlineDisabledRegion(hri);
           return null;
         }
@@ -2131,7 +2120,7 @@ public class AssignmentManager {
     // reset the count, if any
     failedOpenTracker.remove(hri.getEncodedName());
     if (getTableStateManager().isTableState(hri.getTable(),
-        Table.State.DISABLED, Table.State.DISABLING)) {
+            TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(hri);
     }
     return null;
@@ -2149,8 +2138,8 @@ public class AssignmentManager {
     if (current == null || !current.isOnServer(serverName) || !current.isClosing()) {
       return hri.getShortNameToLog() + " is not closing on " + serverName;
     }
-    if (getTableStateManager().isTableState(hri.getTable(), Table.State.DISABLED,
-        Table.State.DISABLING) || replicasToClose.contains(hri)) {
+    if (getTableStateManager().isTableState(hri.getTable(), TableState.State.DISABLED,
+        TableState.State.DISABLING) || replicasToClose.contains(hri)) {
       offlineDisabledRegion(hri);
       return null;
     }
@@ -2263,7 +2252,7 @@ public class AssignmentManager {
 
     // User could disable the table before master knows the new region.
     if (getTableStateManager().isTableState(hri.getTable(),
-        Table.State.DISABLED, Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(a);
       invokeUnAssign(b);
     } else {
@@ -2308,7 +2297,7 @@ public class AssignmentManager {
     regionOffline(a);
     regionOffline(b);
     if (getTableStateManager().isTableState(hri.getTable(),
-        Table.State.DISABLED, Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(hri);
     }
     return null;
@@ -2419,7 +2408,7 @@ public class AssignmentManager {
 
     // User could disable the table before master knows the new region.
     if (getTableStateManager().isTableState(hri.getTable(),
-        Table.State.DISABLED, Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(hri);
     } else {
       Callable mergeReplicasCallable = new Callable() {
@@ -2464,7 +2453,7 @@ public class AssignmentManager {
     regionOffline(hri);
 
     if (getTableStateManager().isTableState(hri.getTable(),
-        Table.State.DISABLED, Table.State.DISABLING)) {
+        TableState.State.DISABLED, TableState.State.DISABLING)) {
       invokeUnAssign(a);
       invokeUnAssign(b);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 714b5a8aede..dfde451c663 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.executor.ExecutorType;
@@ -100,7 +101,6 @@ import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionServerInfo;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
@@ -114,6 +114,7 @@ import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.hadoop.hbase.util.ZKDataMigrator;
 import org.apache.hadoop.hbase.zookeeper.DrainingServerTracker;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
@@ -224,6 +225,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   // monitor for distributed procedures
   MasterProcedureManagerHost mpmHost;
 
+  // handle table states
+  private TableStateManager tableStateManager;
+
   /** flag used in test cases in order to simulate RS failures during master initialization */
   private volatile boolean initializationBeforeMetaAssignment = false;
 
@@ -409,7 +413,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.loadBalancerTracker.start();
     this.assignmentManager = new AssignmentManager(this, serverManager,
       this.balancer, this.service, this.metricsMaster,
-      this.tableLockManager);
+      this.tableLockManager, tableStateManager);
 
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
         this.serverManager);
@@ -436,6 +440,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.mpmHost.register(new MasterFlushTableProcedureManager());
     this.mpmHost.loadProcedures(conf);
     this.mpmHost.initialize(this, this.metricsMaster);
+
+    // migrating existent table state from zk
+    for (Map.Entry entry : ZKDataMigrator
+        .queryForTableStates(getZooKeeper()).entrySet()) {
+      LOG.info("Converting state from zk to new states:" + entry);
+      tableStateManager.setTableState(entry.getKey(), entry.getValue());
+    }
+    ZKUtil.deleteChildrenRecursively(getZooKeeper(), getZooKeeper().tableZNode);
   }
 
   /**
@@ -490,6 +502,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     // Invalidate all write locks held previously
     this.tableLockManager.reapWriteLocks();
 
+    this.tableStateManager = new TableStateManager(this);
+    this.tableStateManager.start();
+
     status.setStatus("Initializing ZK system trackers");
     initializeZKBasedSystemTrackers();
 
@@ -737,8 +752,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   }
 
   private void enableMeta(TableName metaTableName) {
-    if (!this.assignmentManager.getTableStateManager().isTableState(metaTableName,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+    if (!this.tableStateManager.isTableState(metaTableName,
+            TableState.State.ENABLED)) {
       this.assignmentManager.setEnabledTable(metaTableName);
     }
   }
@@ -777,6 +792,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     return this.fileSystemManager;
   }
 
+  @Override
+  public TableStateManager getTableStateManager() {
+    return tableStateManager;
+  }
+
   /*
    * Start up all services. If any of these threads gets an unhandled exception
    * then they just die with a logged message.  This should be fine because
@@ -1452,7 +1472,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       throw new TableNotFoundException(tableName);
     }
     if (!getAssignmentManager().getTableStateManager().
-        isTableState(tableName, ZooKeeperProtos.Table.State.DISABLED)) {
+        isTableState(tableName, TableState.State.DISABLED)) {
       throw new TableNotDisabledException(tableName);
     }
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index be702ab7cc9..49225c4fd60 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -43,8 +43,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
@@ -454,7 +456,9 @@ public class MasterFileSystem {
     }
 
     // Create tableinfo-s for hbase:meta if not already there.
-    new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC);
+    // assume, created table descriptor is for enabling table
+    new FSTableDescriptors(fs, rd).createTableDescriptor(
+        new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLING));
 
     return rd;
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index bb4a09c517a..54f8777a609 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -37,8 +37,10 @@ import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
@@ -848,7 +850,7 @@ public class MasterRpcServices extends RSRpcServices
   public GetTableNamesResponse getTableNames(RpcController controller,
       GetTableNamesRequest req) throws ServiceException {
     try {
-      master.checkInitialized();
+      master.checkServiceStarted();
       Collection descriptors = master.getTableDescriptors().getAll().values();
       GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
       for (HTableDescriptor descriptor: descriptors) {
@@ -863,6 +865,25 @@ public class MasterRpcServices extends RSRpcServices
     }
   }
 
+  @Override
+  public MasterProtos.GetTableStateResponse getTableState(RpcController controller,
+      MasterProtos.GetTableStateRequest request) throws ServiceException {
+    try {
+      master.checkServiceStarted();
+      TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+      TableState.State state = master.getTableStateManager()
+              .getTableState(tableName);
+      if (state == null)
+        throw new TableNotFoundException(tableName);
+      MasterProtos.GetTableStateResponse.Builder builder =
+              MasterProtos.GetTableStateResponse.newBuilder();
+      builder.setTableState(new TableState(tableName, state).convert());
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
   @Override
   public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(RpcController c,
       IsCatalogJanitorEnabledRequest req) throws ServiceException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index c1334f5f1d5..213f7f8dd84 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -65,6 +65,11 @@ public interface MasterServices extends Server {
    */
   TableLockManager getTableLockManager();
 
+  /**
+   * @return Master's instance of {@link TableStateManager}
+   */
+  TableStateManager getTableStateManager();
+
   /**
    * @return Master's instance of {@link MasterCoprocessorHost}
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 6a0882cf7b9..38e972f3fed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -29,6 +29,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -40,16 +42,12 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
 /**
  * Region state accountant. It holds the states of all regions in the memory.
  * In normal scenario, it should match the meta table and the true region states.
@@ -509,7 +507,7 @@ public class RegionStates {
       if (oldServerName != null && serverHoldings.containsKey(oldServerName)
           && (newState == State.MERGED || newState == State.SPLIT
             || hri.isMetaRegion() || tableStateManager.isTableState(hri.getTable(),
-              ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING))) {
+              TableState.State.DISABLED, TableState.State.DISABLING))) {
         // Offline the region only if it's merged/split, or the table is disabled/disabling.
         // Otherwise, offline it from this server only when it is online on a different server.
         LOG.info("Offlined " + hri.getShortNameToLog() + " from " + oldServerName);
@@ -937,8 +935,8 @@ public class RegionStates {
    * Update a region state. It will be put in transition if not already there.
    */
   private RegionState updateRegionState(final HRegionInfo hri,
-      final State state, final ServerName serverName, long openSeqNum) {
-    if (state == State.FAILED_CLOSE || state == State.FAILED_OPEN) {
+      final RegionState.State state, final ServerName serverName, long openSeqNum) {
+    if (state == RegionState.State.FAILED_CLOSE || state == RegionState.State.FAILED_OPEN) {
       LOG.warn("Failed to open/close " + hri.getShortNameToLog()
         + " on " + serverName + ", set to " + state);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index a78e225c586..8ad0d1ae498 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -258,7 +259,7 @@ public class TableNamespaceManager {
     }
 
     // Now check if the table is assigned, if not then fail fast
-    if (isTableAssigned()) {
+    if (isTableAssigned() && isTableEnabled()) {
       try {
         nsTable = new HTable(conf, TableName.NAMESPACE_TABLE_NAME);
         zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
@@ -298,6 +299,12 @@ public class TableNamespaceManager {
     return false;
   }
 
+  private boolean isTableEnabled() throws IOException {
+    return masterServices.getTableStateManager().getTableState(
+            TableName.NAMESPACE_TABLE_NAME
+    ).equals(TableState.State.ENABLED);
+  }
+
   private boolean isTableAssigned() {
     return !masterServices.getAssignmentManager()
         .getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME).isEmpty();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
new file mode 100644
index 00000000000..26b1901805d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableStateManager.java
@@ -0,0 +1,217 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.TableState;
+
+/**
+ * This is a helper class used to manage table states.
+ * States persisted in tableinfo and cached internally.
+ */
+@InterfaceAudience.Private
+public class TableStateManager {
+  private static final Log LOG = LogFactory.getLog(TableStateManager.class);
+  private final TableDescriptors descriptors;
+
+  private final Map tableStates = Maps.newConcurrentMap();
+
+  public TableStateManager(MasterServices master) {
+    this.descriptors = master.getTableDescriptors();
+  }
+
+  public void start() throws IOException {
+    Map all = descriptors.getAllDescriptors();
+    for (TableDescriptor table : all.values()) {
+      TableName tableName = table.getHTableDescriptor().getTableName();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Adding table state: " + tableName
+            + ": " + table.getTableState());
+      }
+      tableStates.put(tableName, table.getTableState());
+    }
+  }
+
+  /**
+   * Set table state to provided.
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @throws IOException
+   */
+  public void setTableState(TableName tableName, TableState.State newState) throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (descriptor.getTableState() != newState) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+      }
+    }
+  }
+
+  /**
+   * Set table state to provided but only if table in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfInStates(TableName tableName,
+                                         TableState.State newState,
+                                         TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+
+  /**
+   * Set table state to provided but only if table not in specified states
+   * Caller should lock table on write.
+   * @param tableName table to change state for
+   * @param newState new state
+   * @param states states to check against
+   * @throws IOException
+   */
+  public boolean setTableStateIfNotInStates(TableName tableName,
+                                            TableState.State newState,
+                                            TableState.State... states)
+          throws IOException {
+    synchronized (tableStates) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor == null) {
+        throw new TableNotFoundException(tableName);
+      }
+      if (!TableState.isInStates(descriptor.getTableState(), states)) {
+        writeDescriptor(
+            new TableDescriptor(descriptor.getHTableDescriptor(), newState));
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+  public boolean isTableState(TableName tableName, TableState.State... states) {
+    TableState.State tableState = null;
+    try {
+      tableState = getTableState(tableName);
+    } catch (IOException e) {
+      LOG.error("Unable to get table state, probably table not exists");
+      return false;
+    }
+    return tableState != null && TableState.isInStates(tableState, states);
+  }
+
+  public void setDeletedTable(TableName tableName) throws IOException {
+    TableState.State remove = tableStates.remove(tableName);
+    if (remove == null) {
+      LOG.warn("Moving table " + tableName + " state to deleted but was " +
+              "already deleted");
+    }
+  }
+
+  public boolean isTablePresent(TableName tableName) throws IOException {
+    return getTableState(tableName) != null;
+  }
+
+  /**
+   * Return all tables in given states.
+   *
+   * @param states filter by states
+   * @return tables in given states
+   * @throws IOException
+   */
+  public Set getTablesInStates(TableState.State... states) throws IOException {
+    Set rv = Sets.newHashSet();
+    for (Map.Entry entry : tableStates.entrySet()) {
+      if (TableState.isInStates(entry.getValue(), states))
+        rv.add(entry.getKey());
+    }
+    return rv;
+  }
+
+  public TableState.State getTableState(TableName tableName) throws IOException {
+    TableState.State tableState = tableStates.get(tableName);
+    if (tableState == null) {
+      TableDescriptor descriptor = readDescriptor(tableName);
+      if (descriptor != null)
+        tableState = descriptor.getTableState();
+    }
+    return tableState;
+  }
+
+  /**
+   * Write descriptor in place, update cache of states.
+   * Write lock should be hold by caller.
+   *
+   * @param descriptor what to write
+   */
+  private void writeDescriptor(TableDescriptor descriptor) throws IOException {
+    TableName tableName = descriptor.getHTableDescriptor().getTableName();
+    TableState.State state = descriptor.getTableState();
+    descriptors.add(descriptor);
+    LOG.debug("Table " + tableName + " written descriptor for state " + state);
+    tableStates.put(tableName, state);
+    LOG.debug("Table " + tableName + " updated state to " + state);
+  }
+
+  /**
+   * Read current descriptor for table, update cache of states.
+   *
+   * @param table descriptor to read
+   * @return descriptor
+   * @throws IOException
+   */
+  private TableDescriptor readDescriptor(TableName tableName) throws IOException {
+    TableDescriptor descriptor = descriptors.getDescriptor(tableName);
+    if (descriptor == null)
+      tableStates.remove(tableName);
+    else
+      tableStates.put(tableName, descriptor.getTableState());
+    return descriptor;
+  }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
index 3a86128063d..e584008f0b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
@@ -31,14 +31,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableExistsException;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.ipc.RequestContext;
@@ -49,7 +51,6 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -121,8 +122,6 @@ public class CreateTableHandler extends EventHandler {
       if (MetaTableAccessor.tableExists(this.server.getShortCircuitConnection(), tableName)) {
         throw new TableExistsException(tableName);
       }
-
-      checkAndSetEnablingTable(assignmentManager, tableName);
       success = true;
     } finally {
       if (!success) {
@@ -132,47 +131,6 @@ public class CreateTableHandler extends EventHandler {
     return this;
   }
 
-  static void checkAndSetEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName) throws IOException {
-    // If we have multiple client threads trying to create the table at the
-    // same time, given the async nature of the operation, the table
-    // could be in a state where hbase:meta table hasn't been updated yet in
-    // the process() function.
-    // Use enabling state to tell if there is already a request for the same
-    // table in progress. This will introduce a new zookeeper call. Given
-    // createTable isn't a frequent operation, that should be ok.
-    // TODO: now that we have table locks, re-evaluate above -- table locks are not enough.
-    // We could have cleared the hbase.rootdir and not zk.  How can we detect this case?
-    // Having to clean zk AND hdfs is awkward.
-    try {
-      if (!assignmentManager.getTableStateManager().setTableStateIfNotInStates(tableName,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLING,
-        ZooKeeperProtos.Table.State.ENABLED)) {
-        throw new TableExistsException(tableName);
-      }
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that the table will be" +
-        " enabling because of a ZooKeeper issue", e);
-    }
-  }
-
-  static void removeEnablingTable(final AssignmentManager assignmentManager,
-      final TableName tableName) {
-    // Try deleting the enabling node in case of error
-    // If this does not happen then if the client tries to create the table
-    // again with the same Active master
-    // It will block the creation saying TableAlreadyExists.
-    try {
-      assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLING, false);
-    } catch (CoordinatedStateException e) {
-      // Keeper exception should not happen here
-      LOG.error("Got a keeper exception while removing the ENABLING table znode "
-          + tableName, e);
-    }
-  }
-
   @Override
   public String toString() {
     String name = "UnknownServerName";
@@ -218,9 +176,6 @@ public class CreateTableHandler extends EventHandler {
     releaseTableLock();
     LOG.info("Table, " + this.hTableDescriptor.getTableName() + ", creation " +
         (exception == null ? "successful" : "failed. " + exception));
-    if (exception != null) {
-      removeEnablingTable(this.assignmentManager, this.hTableDescriptor.getTableName());
-    }
   }
 
   /**
@@ -243,9 +198,12 @@ public class CreateTableHandler extends EventHandler {
     FileSystem fs = fileSystemManager.getFileSystem();
 
     // 1. Create Table Descriptor
+    // using a copy of descriptor, table will be created enabling first
+    TableDescriptor underConstruction = new TableDescriptor(
+        this.hTableDescriptor, TableState.State.ENABLING);
     Path tempTableDir = FSUtils.getTableDir(tempdir, tableName);
     new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-      tempTableDir, this.hTableDescriptor, false);
+      tempTableDir, underConstruction, false);
     Path tableDir = FSUtils.getTableDir(fileSystemManager.getRootDir(), tableName);
 
     // 2. Create Regions
@@ -271,20 +229,15 @@ public class CreateTableHandler extends EventHandler {
       ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
     }
 
-    // 8. Set table enabled flag up in zk.
-    try {
-      assignmentManager.getTableStateManager().setTableState(tableName,
-        ZooKeeperProtos.Table.State.ENABLED);
-    } catch (CoordinatedStateException e) {
-      throw new IOException("Unable to ensure that " + tableName + " will be" +
-        " enabled because of a ZooKeeper issue", e);
-    }
+    // 6. Enable table
+    assignmentManager.getTableStateManager().setTableState(tableName,
+            TableState.State.ENABLED);
   }
 
   /**
    * Create any replicas for the regions (the default replicas that was
    * already created is passed to the method)
-   * @param hTableDescriptor
+   * @param hTableDescriptor descriptor to use
    * @param regions default replicas
    * @return the combined list of default and non-default replicas
    */
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
index 730da734e92..58be7282237 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -58,7 +59,7 @@ public class DeleteTableHandler extends TableEventHandler {
   @Override
   protected void prepareWithTableLock() throws IOException {
     // The next call fails if no such table.
-    hTableDescriptor = getTableDescriptor();
+    hTableDescriptor = getTableDescriptor().getHTableDescriptor();
   }
 
   protected void waitRegionInTransition(final List regions)
@@ -102,62 +103,66 @@ public class DeleteTableHandler extends TableEventHandler {
     // 1. Wait because of region in transition
     waitRegionInTransition(regions);
 
-    try {
       // 2. Remove table from hbase:meta and HDFS
-      removeTableData(regions);
-    } finally {
-      // 3. Update table descriptor cache
-      LOG.debug("Removing '" + tableName + "' descriptor.");
-      this.masterServices.getTableDescriptors().remove(tableName);
-
-      AssignmentManager am = this.masterServices.getAssignmentManager();
-
-      // 4. Clean up regions of the table in RegionStates.
-      LOG.debug("Removing '" + tableName + "' from region states.");
-      am.getRegionStates().tableDeleted(tableName);
-
-      // 5. If entry for this table in zk, and up in AssignmentManager, remove it.
-      LOG.debug("Marking '" + tableName + "' as deleted.");
-      am.getTableStateManager().setDeletedTable(tableName);
-    }
+    removeTableData(regions);
 
     if (cpHost != null) {
       cpHost.postDeleteTableHandler(this.tableName);
     }
   }
 
+  private void cleanupTableState() throws IOException {
+    // 3. Update table descriptor cache
+    LOG.debug("Removing '" + tableName + "' descriptor.");
+    this.masterServices.getTableDescriptors().remove(tableName);
+
+    AssignmentManager am = this.masterServices.getAssignmentManager();
+
+    // 4. Clean up regions of the table in RegionStates.
+    LOG.debug("Removing '" + tableName + "' from region states.");
+    am.getRegionStates().tableDeleted(tableName);
+
+    // 5. If entry for this table states, remove it.
+    LOG.debug("Marking '" + tableName + "' as deleted.");
+    am.getTableStateManager().setDeletedTable(tableName);
+  }
+
   /**
    * Removes the table from hbase:meta and archives the HDFS files.
    */
   protected void removeTableData(final List regions)
       throws IOException, CoordinatedStateException {
-    // 1. Remove regions from META
-    LOG.debug("Deleting regions from META");
-    MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
+    try {
+      // 1. Remove regions from META
+      LOG.debug("Deleting regions from META");
+      MetaTableAccessor.deleteRegions(this.server.getShortCircuitConnection(), regions);
 
-    // -----------------------------------------------------------------------
-    // NOTE: At this point we still have data on disk, but nothing in hbase:meta
-    //       if the rename below fails, hbck will report an inconsistency.
-    // -----------------------------------------------------------------------
+      // -----------------------------------------------------------------------
+      // NOTE: At this point we still have data on disk, but nothing in hbase:meta
+      //       if the rename below fails, hbck will report an inconsistency.
+      // -----------------------------------------------------------------------
 
-    // 2. Move the table in /hbase/.tmp
-    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
-    Path tempTableDir = mfs.moveTableToTemp(tableName);
+      // 2. Move the table in /hbase/.tmp
+      MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+      Path tempTableDir = mfs.moveTableToTemp(tableName);
 
-    // 3. Archive regions from FS (temp directory)
-    FileSystem fs = mfs.getFileSystem();
-    for (HRegionInfo hri: regions) {
-      LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
-      HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
-          tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+      // 3. Archive regions from FS (temp directory)
+      FileSystem fs = mfs.getFileSystem();
+      for (HRegionInfo hri : regions) {
+        LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
+        HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
+            tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName()));
+      }
+
+      // 4. Delete table directory from FS (temp directory)
+      if (!fs.delete(tempTableDir, true)) {
+        LOG.error("Couldn't delete " + tempTableDir);
+      }
+
+      LOG.debug("Table '" + tableName + "' archived!");
+    } finally {
+      cleanupTableState();
     }
-
-    // 4. Delete table directory from FS (temp directory)
-    if (!fs.delete(tempTableDir, true)) {
-      LOG.error("Couldn't delete " + tempTableDir);
-    }
-
-    LOG.debug("Table '" + tableName + "' archived!");
   }
 
   @Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index fb7aec8811c..07843a73203 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -25,13 +25,13 @@ import java.util.concurrent.ExecutorService;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
@@ -39,11 +39,10 @@ import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.BulkAssigner;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.htrace.Trace;
 
 /**
@@ -91,16 +90,11 @@ public class DisableTableHandler extends EventHandler {
       // DISABLED or ENABLED.
       //TODO: reevaluate this since we have table locks now
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-            this.tableName, ZooKeeperProtos.Table.State.DISABLING,
-            ZooKeeperProtos.Table.State.ENABLED)) {
-            LOG.info("Table " + tableName + " isn't enabled; skipping disable");
-            throw new TableNotEnabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " disabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+          this.tableName, TableState.State.DISABLING,
+          TableState.State.ENABLED)) {
+          LOG.info("Table " + tableName + " isn't enabled; skipping disable");
+          throw new TableNotEnabledException(this.tableName);
         }
       }
       success = true;
@@ -138,8 +132,6 @@ public class DisableTableHandler extends EventHandler {
       }
     } catch (IOException e) {
       LOG.error("Error trying to disable table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to disable table " + this.tableName, e);
     } finally {
       releaseTableLock();
     }
@@ -155,10 +147,10 @@ public class DisableTableHandler extends EventHandler {
     }
   }
 
-  private void handleDisableTable() throws IOException, CoordinatedStateException {
+  private void handleDisableTable() throws IOException {
     // Set table disabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     boolean done = false;
     while (true) {
       // Get list of online regions that are of this table.  Regions that are
@@ -187,7 +179,7 @@ public class DisableTableHandler extends EventHandler {
     }
     // Flip the table to disabled if success.
     if (done) this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     LOG.info("Disabled table, " + this.tableName + ", is done=" + done);
   }
 
@@ -207,7 +199,7 @@ public class DisableTableHandler extends EventHandler {
       RegionStates regionStates = assignmentManager.getRegionStates();
       for (HRegionInfo region: regions) {
         if (regionStates.isRegionInTransition(region)
-            && !regionStates.isRegionInState(region, State.FAILED_CLOSE)) {
+            && !regionStates.isRegionInState(region, RegionState.State.FAILED_CLOSE)) {
           continue;
         }
         final HRegionInfo hri = region;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
index b8edc0be1b5..5f313898b9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
@@ -27,7 +27,6 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
@@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -95,16 +94,8 @@ public class EnableTableHandler extends EventHandler {
         // retainAssignment is true only during recovery.  In normal case it is false
         if (!this.skipTableStateCheck) {
           throw new TableNotFoundException(tableName);
-        } 
-        try {
-          this.assignmentManager.getTableStateManager().checkAndRemoveTableState(tableName,
-            ZooKeeperProtos.Table.State.ENABLING, true);
-          throw new TableNotFoundException(tableName);
-        } catch (CoordinatedStateException e) {
-          // TODO : Use HBCK to clear such nodes
-          LOG.warn("Failed to delete the ENABLING node for the table " + tableName
-              + ".  The table will remain unusable. Run HBCK to manually fix the problem.");
         }
+        this.assignmentManager.getTableStateManager().setDeletedTable(tableName);
       }
 
       // There could be multiple client requests trying to disable or enable
@@ -112,16 +103,11 @@ public class EnableTableHandler extends EventHandler {
       // After that, no other requests can be accepted until the table reaches
       // DISABLED or ENABLED.
       if (!skipTableStateCheck) {
-        try {
-          if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
-              this.tableName, ZooKeeperProtos.Table.State.ENABLING,
-              ZooKeeperProtos.Table.State.DISABLED)) {
-            LOG.info("Table " + tableName + " isn't disabled; skipping enable");
-            throw new TableNotDisabledException(this.tableName);
-          }
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Unable to ensure that the table will be" +
-            " enabling because of a coordination engine issue", e);
+        if (!this.assignmentManager.getTableStateManager().setTableStateIfInStates(
+            this.tableName, TableState.State.ENABLING,
+            TableState.State.DISABLED)) {
+          LOG.info("Table " + tableName + " isn't disabled; skipping enable");
+          throw new TableNotDisabledException(this.tableName);
         }
       }
       success = true;
@@ -156,11 +142,7 @@ public class EnableTableHandler extends EventHandler {
       if (cpHost != null) {
         cpHost.postEnableTableHandler(this.tableName);
       }
-    } catch (IOException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (CoordinatedStateException e) {
-      LOG.error("Error trying to enable the table " + this.tableName, e);
-    } catch (InterruptedException e) {
+    } catch (IOException | InterruptedException e) {
       LOG.error("Error trying to enable the table " + this.tableName, e);
     } finally {
       releaseTableLock();
@@ -177,14 +159,13 @@ public class EnableTableHandler extends EventHandler {
     }
   }
 
-  private void handleEnableTable() throws IOException, CoordinatedStateException,
+  private void handleEnableTable() throws IOException,
       InterruptedException {
     // I could check table is disabling and if so, not enable but require
     // that user first finish disabling but that might be obnoxious.
 
-    // Set table enabling flag up in zk.
     this.assignmentManager.getTableStateManager().setTableState(this.tableName,
-      ZooKeeperProtos.Table.State.ENABLING);
+      TableState.State.ENABLING);
     boolean done = false;
     ServerManager serverManager = ((HMaster)this.server).getServerManager();
     // Get the regions of this table. We're done when all listed
@@ -236,7 +217,7 @@ public class EnableTableHandler extends EventHandler {
     if (done) {
       // Flip the table to enabled.
       this.assignmentManager.getTableStateManager().setTableState(
-        this.tableName, ZooKeeperProtos.Table.State.ENABLED);
+        this.tableName, TableState.State.ENABLED);
       LOG.info("Table '" + this.tableName
       + "' was successfully enabled. Status: done=" + done);
     } else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
index 591a1d883c9..d7c40bf2bad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
@@ -27,6 +27,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -65,8 +67,9 @@ public class ModifyTableHandler extends TableEventHandler {
     // Check operation is possible on the table in its current state
     // Also checks whether the table exists
     if (masterServices.getAssignmentManager().getTableStateManager()
-        .isTableState(this.htd.getTableName(), ZooKeeperProtos.Table.State.ENABLED)
-        && this.htd.getRegionReplication() != getTableDescriptor().getRegionReplication()) {
+        .isTableState(this.htd.getTableName(), TableState.State.ENABLED)
+        && this.htd.getRegionReplication() != getTableDescriptor()
+        .getHTableDescriptor().getRegionReplication()) {
       throw new IOException("REGION_REPLICATION change is not supported for enabled tables");
     }
   }
@@ -79,11 +82,14 @@ public class ModifyTableHandler extends TableEventHandler {
       cpHost.preModifyTableHandler(this.tableName, this.htd);
     }
     // Update descriptor
-    HTableDescriptor oldHtd = getTableDescriptor();
-    this.masterServices.getTableDescriptors().add(this.htd);
-    deleteFamilyFromFS(hris, oldHtd.getFamiliesKeys());
-    removeReplicaColumnsIfNeeded(this.htd.getRegionReplication(), oldHtd.getRegionReplication(),
-        htd.getTableName());
+    HTableDescriptor oldDescriptor =
+        this.masterServices.getTableDescriptors().get(this.tableName);
+    this.masterServices.getTableDescriptors().add(htd);
+    deleteFamilyFromFS(hris, oldDescriptor.getFamiliesKeys());
+    removeReplicaColumnsIfNeeded(
+        this.htd.getRegionReplication(),
+        oldDescriptor.getRegionReplication(),
+        this.htd.getTableName());
     if (cpHost != null) {
       cpHost.postModifyTableHandler(this.tableName, this.htd);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index cae6142a409..6540491eb68 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -39,10 +40,8 @@ import org.apache.hadoop.hbase.master.DeadServer;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 
 /**
@@ -231,23 +230,23 @@ public class ServerShutdownHandler extends EventHandler {
                   continue;
                 }
                 LOG.info("Reassigning region with rs = " + rit);
-                regionStates.updateRegionState(hri, State.OFFLINE);
+                regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
               } else if (regionStates.isRegionInState(
-                  hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
-                regionStates.updateRegionState(hri, State.OFFLINE);
+                  hri, RegionState.State.SPLITTING_NEW, RegionState.State.MERGING_NEW)) {
+                regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
               }
               toAssignRegions.add(hri);
             } else if (rit != null) {
               if ((rit.isClosing() || rit.isFailedClose() || rit.isOffline())
                   && am.getTableStateManager().isTableState(hri.getTable(),
-                  ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
+                  TableState.State.DISABLED, TableState.State.DISABLING) ||
                   am.getReplicasToClose().contains(hri)) {
                 // If the table was partially disabled and the RS went down, we should clear the RIT
                 // and remove the node for the region.
                 // The rit that we use may be stale in case the table was in DISABLING state
                 // but though we did assign we will not be clearing the znode in CLOSING state.
                 // Doing this will have no harm. See HBASE-5927
-                regionStates.updateRegionState(hri, State.OFFLINE);
+                regionStates.updateRegionState(hri, RegionState.State.OFFLINE);
                 am.offlineDisabledRegion(hri);
               } else {
                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "
@@ -323,7 +322,7 @@ public class ServerShutdownHandler extends EventHandler {
     }
     // If table is not disabled but the region is offlined,
     boolean disabled = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLED);
+      TableState.State.DISABLED);
     if (disabled){
       LOG.info("The table " + hri.getTable()
           + " was disabled.  Hence not proceeding.");
@@ -336,7 +335,7 @@ public class ServerShutdownHandler extends EventHandler {
       return false;
     }
     boolean disabling = assignmentManager.getTableStateManager().isTableState(hri.getTable(),
-      ZooKeeperProtos.Table.State.DISABLING);
+      TableState.State.DISABLING);
     if (disabling) {
       LOG.info("The table " + hri.getTable()
           + " is disabled.  Hence not assigning region" + hri.getEncodedName());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
index cd8fe9ee891..ee32a32de6c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -50,8 +51,8 @@ public class TableAddFamilyHandler extends TableEventHandler {
   @Override
   protected void prepareWithTableLock() throws IOException {
     super.prepareWithTableLock();
-    HTableDescriptor htd = getTableDescriptor();
-    if (htd.hasFamily(familyDesc.getName())) {
+    TableDescriptor htd = getTableDescriptor();
+    if (htd.getHTableDescriptor().hasFamily(familyDesc.getName())) {
       throw new InvalidFamilyOperationException("Family '" +
         familyDesc.getNameAsString() + "' already exists so cannot be added");
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
index 330b9d8ad58..b166be09edb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
@@ -50,7 +50,7 @@ public class TableDeleteFamilyHandler extends TableEventHandler {
   @Override
   protected void prepareWithTableLock() throws IOException {
     super.prepareWithTableLock();
-    HTableDescriptor htd = getTableDescriptor();
+    HTableDescriptor htd = getTableDescriptor().getHTableDescriptor();
     this.familyName = hasColumnFamily(htd, familyName);
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index 4f1c39d1d2d..8993840a650 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -30,6 +30,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.CoordinatedStateException;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -40,12 +41,12 @@ import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.BulkReOpen;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 
 import com.google.common.collect.Lists;
@@ -130,7 +131,7 @@ public abstract class TableEventHandler extends EventHandler {
       handleTableOperation(hris);
       if (eventType.isOnlineSchemaChangeSupported() && this.masterServices.
           getAssignmentManager().getTableStateManager().isTableState(
-          tableName, ZooKeeperProtos.Table.State.ENABLED)) {
+          tableName, TableState.State.ENABLED)) {
         if (reOpenAllRegions(hris)) {
           LOG.info("Completed table operation " + eventType + " on table " +
               tableName);
@@ -230,10 +231,10 @@ public abstract class TableEventHandler extends EventHandler {
    * @throws FileNotFoundException
    * @throws IOException
    */
-  public HTableDescriptor getTableDescriptor()
+  public TableDescriptor getTableDescriptor()
   throws FileNotFoundException, IOException {
-    HTableDescriptor htd =
-      this.masterServices.getTableDescriptors().get(tableName);
+    TableDescriptor htd =
+      this.masterServices.getTableDescriptors().getDescriptor(tableName);
     if (htd == null) {
       throw new IOException("HTableDescriptor missing for " + tableName);
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
index d07d0aaae7e..75ec79cd9cf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -49,7 +50,7 @@ public class TableModifyFamilyHandler extends TableEventHandler {
   @Override
   protected void prepareWithTableLock() throws IOException {
     super.prepareWithTableLock();
-    HTableDescriptor htd = getTableDescriptor();
+    HTableDescriptor htd = getTableDescriptor().getHTableDescriptor();
     hasColumnFamily(htd, familyDesc.getName());
   }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
index 086d1d5b384..6703a424019 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TruncateTableHandler.java
@@ -28,15 +28,17 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.master.MasterServices;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@@ -93,54 +95,44 @@ public class TruncateTableHandler extends DeleteTableHandler {
 
     AssignmentManager assignmentManager = this.masterServices.getAssignmentManager();
 
-    // 1. Set table znode
-    CreateTableHandler.checkAndSetEnablingTable(assignmentManager, tableName);
-    try {
-      // 1. Create Table Descriptor
-      new FSTableDescriptors(server.getConfiguration())
-        .createTableDescriptorForTableDirectory(tempdir, this.hTableDescriptor, false);
-      Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName);
-      Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName);
+    // 1. Create Table Descriptor
+    TableDescriptor underConstruction = new TableDescriptor(
+        this.hTableDescriptor, TableState.State.ENABLING);
+    Path tempTableDir = FSUtils.getTableDir(tempdir, this.tableName);
+    new FSTableDescriptors(server.getConfiguration())
+      .createTableDescriptorForTableDirectory(tempTableDir, underConstruction, false);
+    Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), this.tableName);
 
-      HRegionInfo[] newRegions;
-      if (this.preserveSplits) {
-        newRegions = regions.toArray(new HRegionInfo[regions.size()]);
-        LOG.info("Truncate will preserve " + newRegions.length + " regions");
-      } else {
-        newRegions = new HRegionInfo[1];
-        newRegions[0] = new HRegionInfo(this.tableName, null, null);
-        LOG.info("Truncate will not preserve the regions");
-      }
-
-      // 2. Create Regions
-      List regionInfos = ModifyRegionUtils.createRegions(
-        masterServices.getConfiguration(), tempdir,
-        this.hTableDescriptor, newRegions, null);
-
-      // 3. Move Table temp directory to the hbase root location
-      if (!fs.rename(tempTableDir, tableDir)) {
-        throw new IOException("Unable to move table from temp=" + tempTableDir +
-          " to hbase root=" + tableDir);
-      }
-
-      // 4. Add regions to META
-      MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
-        regionInfos);
-
-      // 5. Trigger immediate assignment of the regions in round-robin fashion
-      ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
-
-      // 6. Set table enabled flag up in zk.
-      try {
-        assignmentManager.getTableStateManager().setTableState(tableName,
-          ZooKeeperProtos.Table.State.ENABLED);
-      } catch (CoordinatedStateException e) {
-        throw new IOException("Unable to ensure that " + tableName + " will be" +
-          " enabled because of a ZooKeeper issue", e);
-      }
-    } catch (IOException e) {
-      CreateTableHandler.removeEnablingTable(assignmentManager, tableName);
-      throw e;
+    HRegionInfo[] newRegions;
+    if (this.preserveSplits) {
+      newRegions = regions.toArray(new HRegionInfo[regions.size()]);
+      LOG.info("Truncate will preserve " + newRegions.length + " regions");
+    } else {
+      newRegions = new HRegionInfo[1];
+      newRegions[0] = new HRegionInfo(this.tableName, null, null);
+      LOG.info("Truncate will not preserve the regions");
     }
+
+    // 2. Create Regions
+    List regionInfos = ModifyRegionUtils.createRegions(
+      masterServices.getConfiguration(), tempdir,
+      this.hTableDescriptor, newRegions, null);
+
+    // 3. Move Table temp directory to the hbase root location
+    if (!fs.rename(tempTableDir, tableDir)) {
+      throw new IOException("Unable to move table from temp=" + tempTableDir +
+        " to hbase root=" + tableDir);
+    }
+
+    // 4. Add regions to META
+    MetaTableAccessor.addRegionsToMeta(masterServices.getShortCircuitConnection(),
+      regionInfos);
+
+    // 5. Trigger immediate assignment of the regions in round-robin fashion
+    ModifyRegionUtils.assignRegions(assignmentManager, regionInfos);
+
+    // 6. Set table enabled flag up in zk.
+    assignmentManager.getTableStateManager().setTableState(tableName,
+      TableState.State.ENABLED);
   }
 }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index bfa50048340..e5847aa2152 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RequestContext;
@@ -566,14 +567,14 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     TableName snapshotTable = TableName.valueOf(snapshot.getTable());
     AssignmentManager assignmentMgr = master.getAssignmentManager();
     if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.ENABLED)) {
+        TableState.State.ENABLED)) {
       LOG.debug("Table enabled, starting distributed snapshot.");
       snapshotEnabledTable(snapshot);
       LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
     }
     // For disabled table, snapshot is created by the master
     else if (assignmentMgr.getTableStateManager().isTableState(snapshotTable,
-        ZooKeeperProtos.Table.State.DISABLED)) {
+        TableState.State.DISABLED)) {
       LOG.debug("Table is disabled, running snapshot entirely on master.");
       snapshotDisabledTable(snapshot);
       LOG.debug("Started snapshot: " + ClientSnapshotDescriptionUtils.toString(snapshot));
@@ -705,8 +706,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
 
     // Execute the restore/clone operation
     if (MetaTableAccessor.tableExists(master.getShortCircuitConnection(), tableName)) {
-      if (master.getAssignmentManager().getTableStateManager().isTableState(
-          TableName.valueOf(snapshot.getTable()), ZooKeeperProtos.Table.State.ENABLED)) {
+      if (master.getTableStateManager().isTableState(
+          TableName.valueOf(snapshot.getTable()), TableState.State.ENABLED)) {
         throw new UnsupportedOperationException("Table '" +
             TableName.valueOf(snapshot.getTable()) + "' must be disabled in order to " +
             "perform a restore operation" +
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index b11d74ccb9a..1649c4ebf5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -39,12 +39,14 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@@ -374,7 +376,7 @@ public class NamespaceUpgrade implements Tool {
       HTableDescriptor newDesc = new HTableDescriptor(oldDesc);
       newDesc.setName(newTableName);
       new FSTableDescriptors(this.conf).createTableDescriptorForTableDirectory(
-        newTablePath, newDesc, true);
+        newTablePath, new TableDescriptor(newDesc, TableState.State.ENABLED), true);
     }
 
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index 4417bd9b958..7405272d666 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -112,13 +113,14 @@ public class CompactionTool extends Configured implements Tool {
       if (isFamilyDir(fs, path)) {
         Path regionDir = path.getParent();
         Path tableDir = regionDir.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
         HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-        compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major);
+        compactStoreFiles(tableDir, htd.getHTableDescriptor(), hri,
+            path.getName(), compactOnce, major);
       } else if (isRegionDir(fs, path)) {
         Path tableDir = path.getParent();
-        HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-        compactRegion(tableDir, htd, path, compactOnce, major);
+        TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+        compactRegion(tableDir, htd.getHTableDescriptor(), path, compactOnce, major);
       } else if (isTableDir(fs, path)) {
         compactTable(path, compactOnce, major);
       } else {
@@ -129,9 +131,9 @@ public class CompactionTool extends Configured implements Tool {
 
     private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major)
         throws IOException {
-      HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
+      TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
       for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
-        compactRegion(tableDir, htd, regionDir, compactOnce, major);
+        compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major);
       }
     }
 
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
index 1766d08e355..cf5b12629c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
@@ -46,6 +46,9 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.protobuf.ServiceException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -56,7 +59,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -66,7 +68,6 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
@@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
 import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
@@ -92,7 +94,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId;
@@ -112,10 +113,6 @@ import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RemoteException;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.protobuf.ServiceException;
-
 /**
  * This class is responsible for splitting up a bunch of regionserver commit log
  * files that are no longer being written to, into new files, one per region for
@@ -286,12 +283,13 @@ public class HLogSplitter {
         return true;
       }
       if(csm != null) {
-        try {
-          TableStateManager tsm = csm.getTableStateManager();
-          disablingOrDisabledTables = tsm.getTablesInStates(
-            ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING);
-        } catch (CoordinatedStateException e) {
-          throw new IOException("Can't get disabling/disabled tables", e);
+        HConnection scc = csm.getServer().getShortCircuitConnection();
+        TableName[] tables = scc.listTableNames();
+        for (TableName table : tables) {
+          if (scc.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disablingOrDisabledTables.add(table);
+          }
         }
       }
       int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 00384236ec1..7a03427bc5e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -89,6 +89,7 @@ public class WALCellCodec implements Codec {
    * Fully prepares the codec for use.
    * @param conf {@link Configuration} to read for the user-specified codec. If none is specified,
    *          uses a {@link WALCellCodec}.
+   * @param cellCodecClsName name of codec
    * @param compression compression the codec should use
    * @return a {@link WALCellCodec} ready for use.
    * @throws UnsupportedOperationException if the codec cannot be instantiated
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index 47c6ebf3cbb..d6d4f717a73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -38,6 +38,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
@@ -259,7 +261,8 @@ public class SnapshotManifest {
   private void load() throws IOException {
     switch (getSnapshotFormat(desc)) {
       case SnapshotManifestV1.DESCRIPTOR_VERSION: {
-        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
+        this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir)
+            .getHTableDescriptor();
         ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
         try {
           this.regionManifests =
@@ -353,7 +356,8 @@ public class SnapshotManifest {
       LOG.info("Using old Snapshot Format");
       // write a copy of descriptor to the snapshot directory
       new FSTableDescriptors(fs, rootDir)
-        .createTableDescriptorForTableDirectory(workingDir, htd, false);
+        .createTableDescriptorForTableDirectory(workingDir, new TableDescriptor(
+            htd, TableState.State.ENABLED), false);
     } else {
       LOG.debug("Convert to Single Snapshot Manifest");
       convertToV2SingleManifest();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 09749d0cc96..bc8fc7faacd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import javax.annotation.Nullable;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Comparator;
@@ -38,7 +39,9 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -92,11 +95,11 @@ public class FSTableDescriptors implements TableDescriptors {
    * Data structure to hold modification time and table descriptor.
    */
   private static class TableDescriptorAndModtime {
-    private final HTableDescriptor htd;
+    private final TableDescriptor td;
     private final long modtime;
 
-    TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) {
-      this.htd = htd;
+    TableDescriptorAndModtime(final long modtime, final TableDescriptor td) {
+      this.td = td;
       this.modtime = modtime;
     }
 
@@ -104,8 +107,16 @@ public class FSTableDescriptors implements TableDescriptors {
       return this.modtime;
     }
 
-    HTableDescriptor getTableDescriptor() {
-      return this.htd;
+    TableDescriptor getTableDescriptor() {
+      return this.td;
+    }
+
+    HTableDescriptor getHTableDescriptor() {
+      return this.td.getHTableDescriptor();
+    }
+
+    TableState.State getTableState() {
+      return this.td.getTableState();
     }
   }
 
@@ -141,12 +152,13 @@ public class FSTableDescriptors implements TableDescriptors {
    * to see if a newer file has been created since the cached one was read.
    */
   @Override
-  public HTableDescriptor get(final TableName tablename)
+  @Nullable
+  public TableDescriptor getDescriptor(final TableName tablename)
   throws IOException {
     invocations++;
     if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) {
       cachehits++;
-      return HTableDescriptor.META_TABLEDESC;
+      return new TableDescriptor(HTableDescriptor.META_TABLEDESC, TableState.State.ENABLED);
     }
     // hbase:meta is already handled. If some one tries to get the descriptor for
     // .logs, .oldlogs or .corrupt throw an exception.
@@ -182,31 +194,62 @@ public class FSTableDescriptors implements TableDescriptors {
     return tdmt == null ? null : tdmt.getTableDescriptor();
   }
 
+  /**
+   * Get the current table descriptor for the given table, or null if none exists.
+   *
+   * Uses a local cache of the descriptor but still checks the filesystem on each call
+   * to see if a newer file has been created since the cached one was read.
+   */
+  @Override
+  public HTableDescriptor get(TableName tableName) throws IOException {
+    if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tableName)) {
+      cachehits++;
+      return HTableDescriptor.META_TABLEDESC;
+    }
+    TableDescriptor descriptor = getDescriptor(tableName);
+    return descriptor == null ? null : descriptor.getHTableDescriptor();
+  }
+
   /**
    * Returns a map from table name to table descriptor for all tables.
    */
   @Override
-  public Map getAll()
+  public Map getAllDescriptors()
   throws IOException {
-    Map htds = new TreeMap();
+    Map tds = new TreeMap();
     List tableDirs = FSUtils.getTableDirs(fs, rootdir);
     for (Path d: tableDirs) {
-      HTableDescriptor htd = null;
+      TableDescriptor htd = null;
       try {
-        htd = get(FSUtils.getTableName(d));
+        htd = getDescriptor(FSUtils.getTableName(d));
       } catch (FileNotFoundException fnfe) {
         // inability of retrieving one HTD shouldn't stop getting the remaining
         LOG.warn("Trouble retrieving htd", fnfe);
       }
       if (htd == null) continue;
-      htds.put(htd.getTableName().getNameAsString(), htd);
+      tds.put(htd.getHTableDescriptor().getTableName().getNameAsString(), htd);
+    }
+    return tds;
+  }
+
+  /**
+   * Returns a map from table name to table descriptor for all tables.
+   */
+  @Override
+  public Map getAll() throws IOException {
+    Map htds = new TreeMap();
+    Map allDescriptors = getAllDescriptors();
+    for (Map.Entry entry : allDescriptors
+        .entrySet()) {
+      htds.put(entry.getKey(), entry.getValue().getHTableDescriptor());
     }
     return htds;
   }
 
-  /* (non-Javadoc)
-   * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path)
-   */
+  /**
+    * Find descriptors by namespace.
+    * @see #get(org.apache.hadoop.hbase.TableName)
+    */
   @Override
   public Map getByNamespace(String name)
   throws IOException {
@@ -227,6 +270,27 @@ public class FSTableDescriptors implements TableDescriptors {
     return htds;
   }
 
+  /**
+   * Adds (or updates) the table descriptor to the FileSystem
+   * and updates the local cache with it.
+   */
+  @Override
+  public void add(TableDescriptor htd) throws IOException {
+    if (fsreadonly) {
+      throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
+    }
+    TableName tableName = htd.getHTableDescriptor().getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
+      throw new NotImplementedException();
+    }
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
+      throw new NotImplementedException(
+        "Cannot add a table descriptor for a reserved subdirectory name: "
+            + htd.getHTableDescriptor().getNameAsString());
+    }
+    updateTableDescriptor(htd);
+  }
+
   /**
    * Adds (or updates) the table descriptor to the FileSystem
    * and updates the local cache with it.
@@ -236,16 +300,21 @@ public class FSTableDescriptors implements TableDescriptors {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot add a table descriptor - in read only mode");
     }
-    if (TableName.META_TABLE_NAME.equals(htd.getTableName())) {
+    TableName tableName = htd.getTableName();
+    if (TableName.META_TABLE_NAME.equals(tableName)) {
       throw new NotImplementedException();
     }
-    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) {
+    if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) {
       throw new NotImplementedException(
-        "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString());
+          "Cannot add a table descriptor for a reserved subdirectory name: "
+              + htd.getNameAsString());
     }
-    updateTableDescriptor(htd);
-    long modtime = getTableInfoModtime(htd.getTableName());
-    this.cache.put(htd.getTableName(), new TableDescriptorAndModtime(modtime, htd));
+    TableDescriptor descriptor = getDescriptor(htd.getTableName());
+    if (descriptor == null)
+      descriptor = new TableDescriptor(htd);
+    else
+      descriptor.setHTableDescriptor(htd);
+    updateTableDescriptor(descriptor);
   }
 
   /**
@@ -266,7 +335,7 @@ public class FSTableDescriptors implements TableDescriptors {
       }
     }
     TableDescriptorAndModtime tdm = this.cache.remove(tablename);
-    return tdm == null ? null : tdm.getTableDescriptor();
+    return tdm == null ? null : tdm.getHTableDescriptor();
   }
 
   /**
@@ -463,7 +532,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * if it exists, bypassing the local cache.
    * Returns null if it's not found.
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs,
       Path hbaseRootDir, TableName tableName) throws IOException {
     Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName);
     return getTableDescriptorFromFs(fs, tableDir);
@@ -474,7 +543,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * directly from the file system if it exists.
    * @throws TableInfoMissingException if there is no descriptor
    */
-  public static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
+  public static TableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir)
   throws IOException {
     FileStatus status = getTableInfoPath(fs, tableDir, false);
     if (status == null) {
@@ -509,11 +578,11 @@ public class FSTableDescriptors implements TableDescriptors {
     if (status == null) {
       return null;
     }
-    HTableDescriptor htd = readTableDescriptor(fs, status, !fsreadonly);
-    return new TableDescriptorAndModtime(status.getModificationTime(), htd);
+    TableDescriptor td = readTableDescriptor(fs, status, !fsreadonly);
+    return new TableDescriptorAndModtime(status.getModificationTime(), td);
   }
 
-  private static HTableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
+  private static TableDescriptor readTableDescriptor(FileSystem fs, FileStatus status,
       boolean rewritePb) throws IOException {
     int len = Ints.checkedCast(status.getLen());
     byte [] content = new byte[len];
@@ -523,9 +592,9 @@ public class FSTableDescriptors implements TableDescriptors {
     } finally {
       fsDataInputStream.close();
     }
-    HTableDescriptor htd = null;
+    TableDescriptor td = null;
     try {
-      htd = HTableDescriptor.parseFrom(content);
+      td = TableDescriptor.parseFrom(content);
     } catch (DeserializationException e) {
       throw new IOException("content=" + Bytes.toShort(content), e);
     }
@@ -533,25 +602,28 @@ public class FSTableDescriptors implements TableDescriptors {
       // Convert the file over to be pb before leaving here.
       Path tableInfoDir = status.getPath().getParent();
       Path tableDir = tableInfoDir.getParent();
-      writeTableDescriptor(fs, htd, tableDir, status);
+      writeTableDescriptor(fs, td, tableDir, status);
     }
-    return htd;
+    return td;
   }
- 
+
   /**
    * Update table descriptor on the file system
    * @throws IOException Thrown if failed update.
    * @throws NotImplementedException if in read only mode
    */
-  @VisibleForTesting Path updateTableDescriptor(HTableDescriptor htd)
+  @VisibleForTesting Path updateTableDescriptor(TableDescriptor td)
   throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot update a table descriptor - in read only mode");
     }
-    Path tableDir = getTableDir(htd.getTableName());
-    Path p = writeTableDescriptor(fs, htd, tableDir, getTableInfoPath(tableDir));
+    TableName tableName = td.getHTableDescriptor().getTableName();
+    Path tableDir = getTableDir(tableName);
+    Path p = writeTableDescriptor(fs, td, tableDir, getTableInfoPath(tableDir));
     if (p == null) throw new IOException("Failed update");
     LOG.info("Updated tableinfo=" + p);
+    long modtime = getTableInfoModtime(tableName);
+    this.cache.put(tableName, new TableDescriptorAndModtime(modtime, td));
     return p;
   }
 
@@ -601,7 +673,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @return Descriptor file or null if we failed write.
    */
   private static Path writeTableDescriptor(final FileSystem fs, 
-    final HTableDescriptor htd, final Path tableDir,
+    final TableDescriptor htd, final Path tableDir,
     final FileStatus currentDescriptorFile)
   throws IOException {  
     // Get temporary dir into which we'll first write a file to avoid half-written file phenomenon.
@@ -632,7 +704,7 @@ public class FSTableDescriptors implements TableDescriptors {
       }
       tableInfoDirPath = new Path(tableInfoDir, filename);
       try {
-        writeHTD(fs, tempPath, htd);
+        writeTD(fs, tempPath, htd);
         fs.mkdirs(tableInfoDirPath.getParent());
         if (!fs.rename(tempPath, tableInfoDirPath)) {
           throw new IOException("Failed rename of " + tempPath + " to " + tableInfoDirPath);
@@ -656,7 +728,7 @@ public class FSTableDescriptors implements TableDescriptors {
     return tableInfoDirPath;
   }
   
-  private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
+  private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd)
   throws IOException {
     FSDataOutputStream out = fs.create(p, false);
     try {
@@ -673,10 +745,19 @@ public class FSTableDescriptors implements TableDescriptors {
    * Used by tests.
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+  public boolean createTableDescriptor(TableDescriptor htd) throws IOException {
     return createTableDescriptor(htd, false);
   }
 
+  /**
+   * Create new HTableDescriptor in HDFS. Happens when we are creating table.
+   * Used by tests.
+   * @return True if we successfully created file.
+   */
+  public boolean createTableDescriptor(HTableDescriptor htd) throws IOException {
+    return createTableDescriptor(new TableDescriptor(htd), false);
+  }
+
   /**
    * Create new HTableDescriptor in HDFS. Happens when we are creating table. If
    * forceCreation is true then even if previous table descriptor is present it
@@ -684,12 +765,21 @@ public class FSTableDescriptors implements TableDescriptors {
    * 
    * @return True if we successfully created file.
    */
-  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
+  public boolean createTableDescriptor(TableDescriptor htd, boolean forceCreation)
   throws IOException {
-    Path tableDir = getTableDir(htd.getTableName());
+    Path tableDir = getTableDir(htd.getHTableDescriptor().getTableName());
     return createTableDescriptorForTableDirectory(tableDir, htd, forceCreation);
   }
-  
+
+  /**
+   * Create tables descriptor for given HTableDescriptor. Default TableDescriptor state
+   * will be used (typically ENABLED).
+   */
+  public boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation)
+      throws IOException {
+    return createTableDescriptor(new TableDescriptor(htd), forceCreation);
+  }
+
   /**
    * Create a new HTableDescriptor in HDFS in the specified table directory. Happens when we create
    * a new table or snapshot a table.
@@ -702,7 +792,7 @@ public class FSTableDescriptors implements TableDescriptors {
    * @throws IOException if a filesystem error occurs
    */
   public boolean createTableDescriptorForTableDirectory(Path tableDir,
-      HTableDescriptor htd, boolean forceCreation) throws IOException {
+      TableDescriptor htd, boolean forceCreation) throws IOException {
     if (fsreadonly) {
       throw new NotImplementedException("Cannot create a table descriptor - in read only mode");
     }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 017153a59c7..d5cb4396742 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.io.PrintWriter;
 import java.io.StringWriter;
 import java.net.URI;
@@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -89,6 +89,7 @@ import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
@@ -107,7 +108,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.io.IOUtils;
@@ -953,9 +953,9 @@ public class HBaseFsck extends Configured {
         modTInfo = new TableInfo(tableName);
         tablesInfo.put(tableName, modTInfo);
         try {
-          HTableDescriptor htd =
+          TableDescriptor htd =
               FSTableDescriptors.getTableDescriptorFromFs(fs, hbaseRoot, tableName);
-          modTInfo.htds.add(htd);
+          modTInfo.htds.add(htd.getHTableDescriptor());
         } catch (IOException ioe) {
           if (!orphanTableDirs.containsKey(tableName)) {
             LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
@@ -1009,7 +1009,7 @@ public class HBaseFsck extends Configured {
     for (String columnfamimly : columns) {
       htd.addFamily(new HColumnDescriptor(columnfamimly));
     }
-    fstd.createTableDescriptor(htd, true);
+    fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
     return true;
   }
 
@@ -1057,7 +1057,7 @@ public class HBaseFsck extends Configured {
           if (tableName.equals(htds[j].getTableName())) {
             HTableDescriptor htd = htds[j];
             LOG.info("fixing orphan table: " + tableName + " from cache");
-            fstd.createTableDescriptor(htd, true);
+            fstd.createTableDescriptor(new TableDescriptor(htd, TableState.State.ENABLED), true);
             j++;
             iter.remove();
           }
@@ -1382,22 +1382,16 @@ public class HBaseFsck extends Configured {
    * @throws IOException
    */
   private void loadDisabledTables()
-  throws ZooKeeperConnectionException, IOException {
+  throws IOException {
     HConnectionManager.execute(new HConnectable(getConf()) {
       @Override
       public Void connect(HConnection connection) throws IOException {
-        ZooKeeperWatcher zkw = createZooKeeperWatcher();
-        try {
-          for (TableName tableName :
-              ZKTableStateClientSideReader.getDisabledOrDisablingTables(zkw)) {
-            disabledTables.add(tableName);
+        TableName[] tables = connection.listTableNames();
+        for (TableName table : tables) {
+          if (connection.getTableState(table)
+              .inStates(TableState.State.DISABLED, TableState.State.DISABLING)) {
+            disabledTables.add(table);
           }
-        } catch (KeeperException ke) {
-          throw new IOException(ke);
-        } catch (InterruptedException e) {
-          throw new InterruptedIOException();
-        } finally {
-          zkw.close();
         }
         return null;
       }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
index 98eb7e22d79..e910be5f1a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
@@ -156,7 +156,8 @@ class HMerge {
 
       this.rootDir = FSUtils.getRootDir(conf);
       Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
+      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir)
+          .getHTableDescriptor();
       String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
 
       this.hlog = HLogFactory.createHLog(fs, tabledir, logname, conf);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index d50005ba4b0..25aeeedff73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.TableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
@@ -153,9 +154,9 @@ public class Merge extends Configured implements Tool {
     if (info2 == null) {
       throw new NullPointerException("info2 is null using key " + meta);
     }
-    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
+    TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
       this.rootdir, this.tableName);
-    HRegion merged = merge(htd, meta, info1, info2);
+    HRegion merged = merge(htd.getHTableDescriptor(), meta, info1, info2);
 
     LOG.info("Adding " + merged.getRegionInfo() + " to " +
         meta.getRegionInfo());
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
index f773b06f503..f4c0c776149 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ZKDataMigrator.java
@@ -18,8 +18,11 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,8 +30,12 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
@@ -151,8 +158,9 @@ public class ZKDataMigrator extends Configured implements Tool {
       }
       byte[] data = ZKUtil.getData(zkw, znode);
       if (ProtobufUtil.isPBMagicPrefix(data)) continue;
-      ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder();
-      builder.setState(ZooKeeperProtos.Table.State.valueOf(Bytes.toString(data)));
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      builder.setState(ZooKeeperProtos.DeprecatedTableState.State.valueOf(Bytes.toString(data)));
       data = ProtobufUtil.prependPBMagic(builder.build().toByteArray());
       ZKUtil.setData(zkw, znode, data);
     }
@@ -247,6 +255,77 @@ public class ZKDataMigrator extends Configured implements Tool {
     }
   }
 
+  /**
+   * Method for table states migration.
+   * Reading state from zk, applying them to internal state
+   * and delete.
+   * Used by master to clean migration from zk based states to
+   * table descriptor based states.
+   */
+  @Deprecated
+  public static Map queryForTableStates(ZooKeeperWatcher zkw)
+      throws KeeperException, InterruptedException {
+    Map rv = new HashMap<>();
+    List children = ZKUtil.listChildrenNoWatch(zkw, zkw.tableZNode);
+    if (children == null)
+      return rv;
+    for (String child: children) {
+      TableName tableName = TableName.valueOf(child);
+      ZooKeeperProtos.DeprecatedTableState.State state = getTableState(zkw, tableName);
+      TableState.State newState = TableState.State.ENABLED;
+      if (state != null) {
+        switch (state) {
+        case ENABLED:
+          newState = TableState.State.ENABLED;
+          break;
+        case DISABLED:
+          newState = TableState.State.DISABLED;
+          break;
+        case DISABLING:
+          newState = TableState.State.DISABLING;
+          break;
+        case ENABLING:
+          newState = TableState.State.ENABLING;
+          break;
+        default:
+        }
+      }
+      rv.put(tableName, newState);
+    }
+    return rv;
+  }
+
+  /**
+   * Gets table state from ZK.
+   * @param zkw ZooKeeperWatcher instance to use
+   * @param tableName table we're checking
+   * @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
+   * @throws KeeperException
+   */
+  @Deprecated
+  private static  ZooKeeperProtos.DeprecatedTableState.State getTableState(
+      final ZooKeeperWatcher zkw, final TableName tableName)
+      throws KeeperException, InterruptedException {
+    String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString());
+    byte [] data = ZKUtil.getData(zkw, znode);
+    if (data == null || data.length <= 0) return null;
+    try {
+      ProtobufUtil.expectPBMagicPrefix(data);
+      ZooKeeperProtos.DeprecatedTableState.Builder builder =
+          ZooKeeperProtos.DeprecatedTableState.newBuilder();
+      int magicLen = ProtobufUtil.lengthOfPBMagic();
+      ZooKeeperProtos.DeprecatedTableState t = builder.mergeFrom(data,
+          magicLen, data.length - magicLen).build();
+      return t.getState();
+    } catch (InvalidProtocolBufferException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    } catch (DeserializationException e) {
+      throw ZKUtil.convert(e);
+    }
+  }
+
   public static void main(String args[]) throws Exception {
     System.exit(ToolRunner.run(HBaseConfiguration.create(), new ZKDataMigrator(), args));
   }
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
deleted file mode 100644
index 1aff12f4349..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKTableStateManager.java
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.CoordinatedStateException;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableStateManager;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-import org.apache.zookeeper.KeeperException;
-
-import java.io.InterruptedIOException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-/**
- * Implementation of TableStateManager which reads, caches and sets state
- * up in ZooKeeper.  If multiple read/write clients, will make for confusion.
- * Code running on client side without consensus context should use
- * {@link ZKTableStateClientSideReader} instead.
- *
- * 

To save on trips to the zookeeper ensemble, internally we cache table - * state. - */ -@InterfaceAudience.Private -public class ZKTableStateManager implements TableStateManager { - // A znode will exist under the table directory if it is in any of the - // following states: {@link TableState#ENABLING} , {@link TableState#DISABLING}, - // or {@link TableState#DISABLED}. If {@link TableState#ENABLED}, there will - // be no entry for a table in zk. Thats how it currently works. - - private static final Log LOG = LogFactory.getLog(ZKTableStateManager.class); - private final ZooKeeperWatcher watcher; - - /** - * Cache of what we found in zookeeper so we don't have to go to zk ensemble - * for every query. Synchronize access rather than use concurrent Map because - * synchronization needs to span query of zk. - */ - private final Map cache = - new HashMap(); - - public ZKTableStateManager(final ZooKeeperWatcher zkw) throws KeeperException, - InterruptedException { - super(); - this.watcher = zkw; - populateTableStates(); - } - - /** - * Gets a list of all the tables set as disabled in zookeeper. - * @throws KeeperException, InterruptedException - */ - private void populateTableStates() throws KeeperException, InterruptedException { - synchronized (this.cache) { - List children = ZKUtil.listChildrenNoWatch(this.watcher, this.watcher.tableZNode); - if (children == null) return; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state = getTableState(this.watcher, tableName); - if (state != null) this.cache.put(tableName, state); - } - } - } - - /** - * Sets table state in ZK. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setTableState(TableName tableName, ZooKeeperProtos.Table.State state) - throws CoordinatedStateException { - synchronized (this.cache) { - LOG.warn("Moving table " + tableName + " state from " + this.cache.get(tableName) - + " to " + state); - try { - setTableStateInZK(tableName, state); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - // Transition ENABLED->DISABLING has to be performed with a hack, because - // we treat empty state as enabled in this case because 0.92- clusters. - if ( - (newState == ZooKeeperProtos.Table.State.DISABLING) && - this.cache.get(tableName) != null && !isTableState(tableName, states) || - (newState != ZooKeeperProtos.Table.State.DISABLING && - !isTableState(tableName, states) )) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - /** - * Checks and sets table state in ZK. Sets no watches. - * {@inheritDoc} - */ - @Override - public boolean setTableStateIfNotInStates(TableName tableName, - ZooKeeperProtos.Table.State newState, - ZooKeeperProtos.Table.State... states) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - return false; - } - try { - setTableStateInZK(tableName, newState); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - return true; - } - } - - private void setTableStateInZK(final TableName tableName, - final ZooKeeperProtos.Table.State state) - throws KeeperException { - String znode = ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString()); - if (ZKUtil.checkExists(this.watcher, znode) == -1) { - ZKUtil.createAndFailSilent(this.watcher, znode); - } - synchronized (this.cache) { - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - builder.setState(state); - byte [] data = ProtobufUtil.prependPBMagic(builder.build().toByteArray()); - ZKUtil.setData(this.watcher, znode, data); - this.cache.put(tableName, state); - } - } - - /** - * Checks if table is marked in specified state in ZK. - * - * {@inheritDoc} - */ - @Override - public boolean isTableState(final TableName tableName, - final ZooKeeperProtos.Table.State... states) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State currentState = this.cache.get(tableName); - return isTableInState(Arrays.asList(states), currentState); - } - } - - /** - * Deletes the table in zookeeper. Fails silently if the - * table is not currently disabled in zookeeper. Sets no watches. - * - * {@inheritDoc} - */ - @Override - public void setDeletedTable(final TableName tableName) - throws CoordinatedStateException { - synchronized (this.cache) { - if (this.cache.remove(tableName) == null) { - LOG.warn("Moving table " + tableName + " state to deleted but was " + - "already deleted"); - } - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - - /** - * check if table is present. - * - * @param tableName table we're working on - * @return true if the table is present - */ - @Override - public boolean isTablePresent(final TableName tableName) { - synchronized (this.cache) { - ZooKeeperProtos.Table.State state = this.cache.get(tableName); - return !(state == null); - } - } - - /** - * Gets a list of all the tables set as disabling in zookeeper. - * @return Set of disabling tables, empty Set if none - * @throws CoordinatedStateException if error happened in underlying coordination engine - */ - @Override - public Set getTablesInStates(ZooKeeperProtos.Table.State... states) - throws InterruptedIOException, CoordinatedStateException { - try { - return getAllTables(states); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void checkAndRemoveTableState(TableName tableName, ZooKeeperProtos.Table.State states, - boolean deletePermanentState) - throws CoordinatedStateException { - synchronized (this.cache) { - if (isTableState(tableName, states)) { - this.cache.remove(tableName); - if (deletePermanentState) { - try { - ZKUtil.deleteNodeFailSilent(this.watcher, - ZKUtil.joinZNode(this.watcher.tableZNode, tableName.getNameAsString())); - } catch (KeeperException e) { - throw new CoordinatedStateException(e); - } - } - } - } - } - - /** - * Gets a list of all the tables of specified states in zookeeper. - * @return Set of tables of specified states, empty Set if none - * @throws KeeperException - */ - Set getAllTables(final ZooKeeperProtos.Table.State... states) - throws KeeperException, InterruptedIOException { - - Set allTables = new HashSet(); - List children = - ZKUtil.listChildrenNoWatch(watcher, watcher.tableZNode); - if(children == null) return allTables; - for (String child: children) { - TableName tableName = TableName.valueOf(child); - ZooKeeperProtos.Table.State state; - try { - state = getTableState(watcher, tableName); - } catch (InterruptedException e) { - throw new InterruptedIOException(); - } - for (ZooKeeperProtos.Table.State expectedState: states) { - if (state == expectedState) { - allTables.add(tableName); - break; - } - } - } - return allTables; - } - - /** - * Gets table state from ZK. - * @param zkw ZooKeeperWatcher instance to use - * @param tableName table we're checking - * @return Null or {@link ZooKeeperProtos.Table.State} found in znode. - * @throws KeeperException - */ - private ZooKeeperProtos.Table.State getTableState(final ZooKeeperWatcher zkw, - final TableName tableName) - throws KeeperException, InterruptedException { - String znode = ZKUtil.joinZNode(zkw.tableZNode, tableName.getNameAsString()); - byte [] data = ZKUtil.getData(zkw, znode); - if (data == null || data.length <= 0) return null; - try { - ProtobufUtil.expectPBMagicPrefix(data); - ZooKeeperProtos.Table.Builder builder = ZooKeeperProtos.Table.newBuilder(); - int magicLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.Table t = builder.mergeFrom(data, magicLen, data.length - magicLen).build(); - return t.getState(); - } catch (InvalidProtocolBufferException e) { - KeeperException ke = new KeeperException.DataInconsistencyException(); - ke.initCause(e); - throw ke; - } catch (DeserializationException e) { - throw ZKUtil.convert(e); - } - } - - /** - * @return true if current state isn't null and is contained - * in the list of expected states. - */ - private boolean isTableInState(final List expectedStates, - final ZooKeeperProtos.Table.State currentState) { - return currentState != null && expectedStates.contains(currentState); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 45bc5247264..dd9384d64bf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -2817,6 +2817,48 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } } + /** + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * Will timeout after default period (30 seconds) + * @param table Table to wait on. + * @throws InterruptedException + * @throws IOException + */ + public void waitTableDisabled(byte[] table) + throws InterruptedException, IOException { + waitTableDisabled(getHBaseAdmin(), table, 30000); + } + + public void waitTableDisabled(Admin admin, byte[] table) + throws InterruptedException, IOException { + waitTableDisabled(admin, table, 30000); + } + + /** + * Waits for a table to be 'disabled'. Disabled means that table is set as 'disabled' + * @see #waitTableAvailable(byte[]) + * @param table Table to wait on. + * @param timeoutMillis Time to wait on it being marked disabled. + * @throws InterruptedException + * @throws IOException + */ + public void waitTableDisabled(byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + waitTableDisabled(getHBaseAdmin(), table, timeoutMillis); + } + + public void waitTableDisabled(Admin admin, byte[] table, long timeoutMillis) + throws InterruptedException, IOException { + TableName tableName = TableName.valueOf(table); + long startWait = System.currentTimeMillis(); + while (!admin.isTableDisabled(tableName)) { + assertTrue("Timed out waiting for table to become disabled " + + Bytes.toStringBinary(table), + System.currentTimeMillis() - startWait < timeoutMillis); + Thread.sleep(200); + } + } + /** * Make sure that at least the specified number of region servers * are running diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java index 26a8d2c76b6..4fa945a7c49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptorDefaultVersions.java @@ -148,8 +148,8 @@ public class TestHColumnDescriptorDefaultVersions { // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - hcds = htd.getColumnFamilies(); + TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + hcds = td.getHTableDescriptor().getColumnFamilies(); verifyHColumnDescriptor(expected, hcds, tableName, families); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java new file mode 100644 index 00000000000..19c1136727f --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestTableDescriptor.java @@ -0,0 +1,57 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +/** + * Test setting values in the descriptor + */ +@Category(SmallTests.class) +public class TestTableDescriptor { + final static Log LOG = LogFactory.getLog(TestTableDescriptor.class); + + @Test + public void testPb() throws DeserializationException, IOException { + HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); + final int v = 123; + htd.setMaxFileSize(v); + htd.setDurability(Durability.ASYNC_WAL); + htd.setReadOnly(true); + htd.setRegionReplication(2); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); + byte[] bytes = td.toByteArray(); + TableDescriptor deserializedTd = TableDescriptor.parseFrom(bytes); + assertEquals(td, deserializedTd); + assertEquals(td.getHTableDescriptor(), deserializedTd.getHTableDescriptor()); + assertEquals(td.getTableState(), deserializedTd.getTableState()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index b6b446da2f6..6fe6ede63da 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.wal.HLogUtilsForTests; @@ -66,7 +65,6 @@ import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.zookeeper.ZKTableStateClientSideReader; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; import org.junit.AfterClass; @@ -258,7 +256,7 @@ public class TestAdmin { this.admin.disableTable(ht.getName()); assertTrue("Table must be disabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.DISABLED)); + ht.getName(), TableState.State.DISABLED)); // Test that table is disabled get = new Get(row); @@ -273,7 +271,7 @@ public class TestAdmin { this.admin.enableTable(table); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - ht.getName(), ZooKeeperProtos.Table.State.ENABLED)); + ht.getName(), TableState.State.ENABLED)); // Test that table is enabled try { @@ -346,7 +344,7 @@ public class TestAdmin { assertEquals(numTables + 1, tables.length); assertTrue("Table must be enabled.", TEST_UTIL.getHBaseCluster() .getMaster().getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("testCreateTable"), ZooKeeperProtos.Table.State.ENABLED)); + TableName.valueOf("testCreateTable"), TableState.State.ENABLED)); } @Test (timeout=300000) @@ -1128,8 +1126,7 @@ public class TestAdmin { ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); TableName tableName = TableName.valueOf("testMasterAdmin"); TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close(); - while (!ZKTableStateClientSideReader.isEnabledTable(zkw, - TableName.valueOf("testMasterAdmin"))) { + while (!this.admin.isTableEnabled(TableName.valueOf("testMasterAdmin"))) { Thread.sleep(10); } this.admin.disableTable(tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 4acece3e233..93f868c0865 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -53,15 +53,14 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; -import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -127,7 +126,8 @@ public class TestAssignmentManagerOnCluster { } RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", metaState.getState(), State.OPEN); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertNotEquals("Meta should be moved off master", metaState.getServerName(), master.getServerName()); assertEquals("Meta should be on the meta server", @@ -153,7 +153,8 @@ public class TestAssignmentManagerOnCluster { regionStates.isRegionOnline(HRegionInfo.FIRST_META_REGIONINFO)); // Now, make sure meta is registered in zk metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper()); - assertEquals("Meta should be not in transition", metaState.getState(), State.OPEN); + assertEquals("Meta should be not in transition", + metaState.getState(), RegionState.State.OPEN); assertEquals("Meta should be assigned", metaState.getServerName(), regionStates.getRegionServerOfRegion(HRegionInfo.FIRST_META_REGIONINFO)); assertNotEquals("Meta should be assigned on a different server", @@ -209,7 +210,8 @@ public class TestAssignmentManagerOnCluster { String table = "testAssignRegionOnRestartedServer"; TEST_UTIL.getMiniHBaseCluster().getConf().setInt("hbase.assignment.maximum.attempts", 20); TEST_UTIL.getMiniHBaseCluster().stopMaster(0); - TEST_UTIL.getMiniHBaseCluster().startMaster(); //restart the master so that conf take into affect + //restart the master so that conf take into affect + TEST_UTIL.getMiniHBaseCluster().startMaster(); ServerName deadServer = null; HMaster master = null; @@ -619,9 +621,9 @@ public class TestAssignmentManagerOnCluster { } } am.regionOffline(hri); - am.getRegionStates().updateRegionState(hri, State.PENDING_OPEN, destServerName); + am.getRegionStates().updateRegionState(hri, RegionState.State.PENDING_OPEN, destServerName); - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLING); + am.getTableStateManager().setTableState(table, TableState.State.DISABLING); List toAssignRegions = am.processServerShutdown(destServerName); assertTrue("Regions to be assigned should be empty.", toAssignRegions.isEmpty()); assertTrue("Regions to be assigned should be empty.", am.getRegionStates() @@ -630,7 +632,7 @@ public class TestAssignmentManagerOnCluster { if (hri != null && serverName != null) { am.regionOnline(hri, serverName); } - am.getTableStateManager().setTableState(table, ZooKeeperProtos.Table.State.DISABLED); + am.getTableStateManager().setTableState(table, TableState.State.DISABLED); TEST_UTIL.deleteTable(table); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 4d590b2cd3a..0103639395a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -55,6 +56,7 @@ import org.apache.hadoop.hbase.client.HConnectionManager; import org.apache.hadoop.hbase.client.HConnectionTestingUtility; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.CoordinatedStateManager; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.master.CatalogJanitor.SplitParentFirstComparator; @@ -299,13 +301,18 @@ public class TestCatalogJanitor { return new TableDescriptors() { @Override public HTableDescriptor remove(TableName tablename) throws IOException { - // TODO Auto-generated method stub + // noop return null; } @Override public Map getAll() throws IOException { - // TODO Auto-generated method stub + // noop + return null; + } + + @Override public Map getAllDescriptors() throws IOException { + // noop return null; } @@ -315,6 +322,12 @@ public class TestCatalogJanitor { return createHTableDescriptor(); } + @Override + public TableDescriptor getDescriptor(TableName tablename) + throws IOException { + return createTableDescriptor(); + } + @Override public Map getByNamespace(String name) throws IOException { return null; @@ -322,8 +335,12 @@ public class TestCatalogJanitor { @Override public void add(HTableDescriptor htd) throws IOException { - // TODO Auto-generated method stub + // noop + } + @Override + public void add(TableDescriptor htd) throws IOException { + // noop } }; } @@ -407,6 +424,11 @@ public class TestCatalogJanitor { return null; } + @Override + public TableStateManager getTableStateManager() { + return null; + } + @Override public void dispatchMergingRegions(HRegionInfo region_a, HRegionInfo region_b, boolean forcible) throws IOException { @@ -978,6 +1000,11 @@ public class TestCatalogJanitor { return htd; } + private TableDescriptor createTableDescriptor() { + TableDescriptor htd = new TableDescriptor(createHTableDescriptor(), TableState.State.ENABLED); + return htd; + } + private MultiResponse buildMultiResponse(MultiRequest req) { MultiResponse.Builder builder = MultiResponse.newBuilder(); RegionActionResult.Builder regionActionResultBuilder = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java index 9feb893c311..6129dd7668c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -82,7 +82,7 @@ public class TestMaster { HTable ht = TEST_UTIL.createTable(TABLENAME, FAMILYNAME); assertTrue(m.assignmentManager.getTableStateManager().isTableState(TABLENAME, - ZooKeeperProtos.Table.State.ENABLED)); + TableState.State.ENABLED)); TEST_UTIL.loadTable(ht, FAMILYNAME, false); ht.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java index d04afdfd427..56961d5a2a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.util.Bytes; @@ -95,8 +95,8 @@ public class TestMasterRestartAfterDisablingTable { assertTrue("The table should not be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager().isTableState( - TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.DISABLED, - ZooKeeperProtos.Table.State.DISABLING)); + TableName.valueOf("tableRestart"), TableState.State.DISABLED, + TableState.State.DISABLING)); log("Enabling table\n"); // Need a new Admin, the previous one is on the old master Admin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); @@ -111,7 +111,7 @@ public class TestMasterRestartAfterDisablingTable { 6, regions.size()); assertTrue("The table should be in enabled state", cluster.getMaster() .getAssignmentManager().getTableStateManager() - .isTableState(TableName.valueOf("tableRestart"), ZooKeeperProtos.Table.State.ENABLED)); + .isTableState(TableName.valueOf("tableRestart"), TableState.State.ENABLED)); ht.close(); TEST_UTIL.shutdownMiniCluster(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 376729b0aba..5d24368df9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; @@ -393,12 +394,14 @@ public class TestTableLockManager { alterThread.start(); splitThread.start(); + TEST_UTIL.waitTableEnabled(tableName.toBytes()); while (true) { List regions = admin.getTableRegions(tableName); LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions)); assertEquals(admin.getTableDescriptor(tableName), desc); for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) { - assertEquals(desc, region.getTableDesc()); + HTableDescriptor regionTableDesc = region.getTableDesc(); + assertEquals(desc, regionTableDesc); } if (regions.size() >= 5) { break; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java index 3fe397767a8..0d51875eb2e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestTableDescriptorModification.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -154,8 +155,9 @@ public class TestTableDescriptorModification { // Verify descriptor from HDFS MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); - verifyTableDescriptor(htd, tableName, families); + TableDescriptor td = + FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir); + verifyTableDescriptor(td.getHTableDescriptor(), tableName, families); } private void verifyTableDescriptor(final HTableDescriptor htd, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index c147fd07088..6a9c0dc2358 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.client.Admin; @@ -479,7 +480,8 @@ public class SnapshotTestingUtils { this.tableRegions = tableRegions; this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir); new FSTableDescriptors(conf) - .createTableDescriptorForTableDirectory(snapshotDir, htd, false); + .createTableDescriptorForTableDirectory(snapshotDir, + new TableDescriptor(htd), false); } public HTableDescriptor getTableDescriptor() { @@ -574,7 +576,8 @@ public class SnapshotTestingUtils { private RegionData[] createTable(final HTableDescriptor htd, final int nregions) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); - new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, htd, false); + new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(tableDir, + new TableDescriptor(htd), false); assertTrue(nregions % 2 == 0); RegionData[] regions = new RegionData[nregions]; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index daf55938d88..839091cb415 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -28,12 +28,13 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; import java.util.Comparator; - +import org.apache.hadoop.hbase.client.TableState; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -69,14 +70,15 @@ public class TestFSTableDescriptors { public void testCreateAndUpdate() throws IOException { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - assertTrue(fstd.createTableDescriptor(htd)); - assertFalse(fstd.createTableDescriptor(htd)); + assertTrue(fstd.createTableDescriptor(td)); + assertFalse(fstd.createTableDescriptor(td)); FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - fstd.updateTableDescriptor(htd); + fstd.updateTableDescriptor(td); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -90,20 +92,29 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor( TableName.valueOf("testSequenceidAdvancesOnTableInfo")); + TableDescriptor td = new TableDescriptor(htd); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - Path p0 = fstd.updateTableDescriptor(htd); + Path p0 = fstd.updateTableDescriptor(td); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); - Path p1 = fstd.updateTableDescriptor(htd); + Path p1 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); - Path p2 = fstd.updateTableDescriptor(htd); + Path p2 = fstd.updateTableDescriptor(td); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); + td = new TableDescriptor(htd, TableState.State.DISABLED); + Path p3 = fstd.updateTableDescriptor(td); + // Assert we cleaned up the old file. + assertTrue(!fs.exists(p2)); + int i3 = FSTableDescriptors.getTableInfoSequenceId(p3); + assertTrue(i3 == i2 + 1); + TableDescriptor descriptor = fstd.getDescriptor(htd.getTableName()); + assertEquals(descriptor, td); } @Test @@ -155,12 +166,13 @@ public class TestFSTableDescriptors { final String name = "testReadingHTDFromFS"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); Path rootdir = UTIL.getDataTestDir(name); FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); - fstd.createTableDescriptor(htd); - HTableDescriptor htd2 = + fstd.createTableDescriptor(td); + TableDescriptor td2 = FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); - assertTrue(htd.equals(htd2)); + assertTrue(td.equals(td2)); } @Test public void testHTableDescriptors() @@ -180,7 +192,8 @@ public class TestFSTableDescriptors { final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { - HTableDescriptor htd = new HTableDescriptor(name + i); + TableDescriptor htd = new TableDescriptor(new HTableDescriptor(name + i), + TableState.State.ENABLED); htds.createTableDescriptor(htd); } @@ -194,7 +207,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name + i)); htd.addFamily(new HColumnDescriptor("" + i)); - htds.updateTableDescriptor(htd); + htds.updateTableDescriptor(new TableDescriptor(htd)); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -277,18 +290,19 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreateTableDescriptorUpdatesIfThereExistsAlready"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); + TableDescriptor td = new TableDescriptor(htd, TableState.State.ENABLED); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); - assertTrue(fstd.createTableDescriptor(htd)); - assertFalse(fstd.createTableDescriptor(htd)); + assertTrue(fstd.createTableDescriptor(td)); + assertFalse(fstd.createTableDescriptor(td)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); - assertTrue(fstd.createTableDescriptor(htd)); //this will re-create + assertTrue(fstd.createTableDescriptor(td)); //this will re-create Path tableDir = fstd.getTableDir(htd.getTableName()); Path tmpTableDir = new Path(tableDir, FSTableDescriptors.TMP_DIR); FileStatus[] statuses = fs.listStatus(tmpTableDir); assertTrue(statuses.length == 0); - assertEquals(htd, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); + assertEquals(td, FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 11516deb338..38924e63d0c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -146,7 +147,8 @@ public class TestMergeTool extends HBaseTestCase { try { // Create meta region createMetaRegion(); - new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(this.desc); + new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor( + new TableDescriptor(this.desc)); /* * Create the regions we will merge */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java deleted file mode 100644 index bb3d3d343a5..00000000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKTableStateManager.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.zookeeper; - -import java.io.IOException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.hbase.Abortable; -import org.apache.hadoop.hbase.CoordinatedStateException; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableStateManager; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.MiscTests; -import org.apache.zookeeper.KeeperException; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table; - -@Category({MiscTests.class, MediumTests.class}) -public class TestZKTableStateManager { - private static final Log LOG = LogFactory.getLog(TestZKTableStateManager.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniZKCluster(); - } - - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniZKCluster(); - } - - @Test - public void testTableStates() - throws CoordinatedStateException, IOException, KeeperException, InterruptedException { - final TableName name = - TableName.valueOf("testDisabled"); - Abortable abortable = new Abortable() { - @Override - public void abort(String why, Throwable e) { - LOG.info(why, e); - } - - @Override - public boolean isAborted() { - return false; - } - - }; - ZooKeeperWatcher zkw = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), - name.getNameAsString(), abortable, true); - TableStateManager zkt = new ZKTableStateManager(zkw); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLING); - assertTrue(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.DISABLED); - assertTrue(zkt.isTableState(name, Table.State.DISABLED)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertTrue(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLING); - assertTrue(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.getTablesInStates(Table.State.DISABLED).contains(name)); - assertTrue(zkt.isTablePresent(name)); - zkt.setTableState(name, Table.State.ENABLED); - assertTrue(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertTrue(zkt.isTablePresent(name)); - zkt.setDeletedTable(name); - assertFalse(zkt.isTableState(name, Table.State.ENABLED)); - assertFalse(zkt.isTableState(name, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED)); - assertFalse(zkt.isTableState(name, Table.State.ENABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.DISABLING)); - assertFalse(zkt.isTableState(name, Table.State.DISABLED, Table.State.ENABLING)); - assertFalse(zkt.isTablePresent(name)); - } -}