HBASE-5936 Add Column-level PB-based calls to HMasterInterface; REVERT

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1344449 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-05-30 20:26:36 +00:00
parent 267dfbee66
commit 0586d64bd5
10 changed files with 305 additions and 9822 deletions

View File

@ -1130,7 +1130,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* @param cfs
* @return An {@link HColumnDescriptor} made from the passed in <code>cfs</code>
*/
public static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
static HColumnDescriptor convert(final ColumnFamilySchema cfs) {
// Use the empty constructor so we preserve the initial values set on construction for things
// like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for
// unrelated-looking test failures that are hard to trace back to here.
@ -1145,7 +1145,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
/**
* @return Convert this instance to a the pb column family type
*/
public ColumnFamilySchema convert() {
ColumnFamilySchema convert() {
ColumnFamilySchema.Builder builder = ColumnFamilySchema.newBuilder();
builder.setName(ByteString.copyFrom(getName()));
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {

View File

@ -1251,7 +1251,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* @return Convert the current {@link HTableDescriptor} into a pb TableSchema instance.
*/
public TableSchema convert() {
TableSchema convert() {
TableSchema.Builder builder = TableSchema.newBuilder();
builder.setName(ByteString.copyFrom(getName()));
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e: this.values.entrySet()) {
@ -1270,7 +1270,7 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* @param ts A pb TableSchema instance.
* @return An {@link HTableDescriptor} made from the passed in pb <code>ts</code>.
*/
public static HTableDescriptor convert(final TableSchema ts) {
static HTableDescriptor convert(final TableSchema ts) {
List<ColumnFamilySchema> list = ts.getColumnFamiliesList();
HColumnDescriptor [] hcds = new HColumnDescriptor[list.size()];
int index = 0;

View File

@ -74,19 +74,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterRespo
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
@ -506,9 +493,8 @@ public class HBaseAdmin implements Abortable, Closeable {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
master.createTable(null, request);
public Void call() throws IOException {
master.createTable(desc, splitKeys);
return null;
}
});
@ -539,9 +525,8 @@ public class HBaseAdmin implements Abortable, Closeable {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
master.deleteTable(null,req);
public Void call() throws IOException {
master.deleteTable(tableName);
return null;
}
});
@ -569,21 +554,19 @@ public class HBaseAdmin implements Abortable, Closeable {
// HMaster removes the table from its HTableDescriptors
if (values == null || values.length == 0) {
tableExists = false;
GetTableDescriptorsResponse htds;
HTableDescriptor[] htds;
MasterKeepAliveConnection master = connection.getKeepAliveMaster();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(null);
htds = master.getTableDescriptors(null, req);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
htds = master.getHTableDescriptors();
} finally {
master.close();
}
for (TableSchema ts : htds.getTableSchemaList()) {
if (Bytes.equals(tableName, ts.getName().toByteArray())) {
tableExists = true;
break;
if (htds != null && htds.length > 0) {
for (HTableDescriptor htd: htds) {
if (Bytes.equals(tableName, htd.getName())) {
tableExists = true;
break;
}
}
}
if (!tableExists) {
@ -726,10 +709,9 @@ public class HBaseAdmin implements Abortable, Closeable {
throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
public Void call() throws IOException {
LOG.info("Started enable of " + Bytes.toString(tableName));
EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
master.enableTable(null,req);
master.enableTable(tableName);
return null;
}
});
@ -796,10 +778,9 @@ public class HBaseAdmin implements Abortable, Closeable {
public void disableTableAsync(final byte [] tableName) throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
public Void call() throws IOException {
LOG.info("Started disable of " + Bytes.toString(tableName));
DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
master.disableTable(null,req);
master.disableTable(tableName);
return null;
}
});
@ -967,14 +948,8 @@ public class HBaseAdmin implements Abortable, Closeable {
HTableDescriptor.isLegalTableName(tableName);
return execute(new MasterCallable<Pair<Integer, Integer>>() {
@Override
public Pair<Integer, Integer> call() throws ServiceException {
GetSchemaAlterStatusRequest req =
RequestConverter.buildGetSchemaAlterStatusRequest(tableName);
GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(null,req);
Pair<Integer,Integer> pair =
new Pair<Integer,Integer>(
new Integer(ret.getYetToUpdateRegions()),new Integer(ret.getTotalRegions()));
return pair;
public Pair<Integer, Integer> call() throws IOException {
return master.getAlterStatus(tableName);
}
});
}
@ -1004,9 +979,8 @@ public class HBaseAdmin implements Abortable, Closeable {
throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
master.addColumn(null,req);
public Void call() throws IOException {
master.addColumn(tableName, column);
return null;
}
});
@ -1037,9 +1011,8 @@ public class HBaseAdmin implements Abortable, Closeable {
throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
master.deleteColumn(null,req);
public Void call() throws IOException {
master.deleteColumn(tableName, columnName);
return null;
}
});
@ -1072,9 +1045,8 @@ public class HBaseAdmin implements Abortable, Closeable {
throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
master.modifyColumn(null,req);
public Void call() throws IOException {
master.modifyColumn(tableName, descriptor);
return null;
}
});
@ -1593,9 +1565,8 @@ public class HBaseAdmin implements Abortable, Closeable {
throws IOException {
execute(new MasterCallable<Void>() {
@Override
public Void call() throws ServiceException {
ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
master.modifyTable(null, request);
public Void call() throws IOException {
master.modifyTable(tableName, htd);
return null;
}
});

View File

@ -79,9 +79,6 @@ import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.VersionedProtocol;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Addressing;
@ -2248,11 +2245,7 @@ public class HConnectionManager {
public HTableDescriptor[] listTables() throws IOException {
MasterKeepAliveConnection master = getKeepAliveMaster();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(null);
return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
return master.getHTableDescriptors();
} finally {
master.close();
}
@ -2263,12 +2256,8 @@ public class HConnectionManager {
if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
MasterKeepAliveConnection master = getKeepAliveMaster();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
} finally {
return master.getHTableDescriptors(tableNames);
}finally {
master.close();
}
}
@ -2291,19 +2280,17 @@ public class HConnectionManager {
return HTableDescriptor.META_TABLEDESC;
}
MasterKeepAliveConnection master = getKeepAliveMaster();
GetTableDescriptorsResponse htds;
HTableDescriptor[] htds;
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(null);
htds = master.getTableDescriptors(null, req);
} catch (ServiceException se) {
throw ProtobufUtil.getRemoteException(se);
} finally {
htds = master.getHTableDescriptors();
}finally {
master.close();
}
for (TableSchema ts : htds.getTableSchemaList()) {
if (Bytes.equals(tableName, ts.getName().toByteArray())) {
return HTableDescriptor.convert(ts);
if (htds != null && htds.length > 0) {
for (HTableDescriptor htd: htds) {
if (Bytes.equals(tableName, htd.getName())) {
return htd;
}
}
}
throw new TableNotFoundException(Bytes.toString(tableName));

View File

@ -27,28 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hbase.ClusterStatus;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
@ -114,106 +94,85 @@ public interface HMasterInterface extends VersionedProtocol {
* Creates a new table asynchronously. If splitKeys are specified, then the
* table will be created with an initial set of multiple regions.
* If splitKeys is null, the table will be created with a single region.
* @param controller Unused (set to null).
* @param req CreateTableRequest that contains:<br>
* - tablesSchema: table descriptor<br>
* - splitKeys
* @throws ServiceException
* @param desc table descriptor
* @param splitKeys
* @throws IOException
*/
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException;
public void createTable(HTableDescriptor desc, byte [][] splitKeys)
throws IOException;
/**
* Deletes a table
* @param controller Unused (set to null).
* @param req DeleteTableRequest that contains:<br>
* - tableName: table to delete
* @throws ServiceException
* @param tableName table to delete
* @throws IOException e
*/
public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest req)
throws ServiceException;
public void deleteTable(final byte [] tableName) throws IOException;
/**
* Used by the client to get the number of regions that have received the
* updated schema
*
* @param controller Unused (set to null).
* @param req GetSchemaAlterStatusRequest that contains:<br>
* - tableName
* @return GetSchemaAlterStatusResponse indicating the number of regions updated.
* yetToUpdateRegions is the regions that are yet to be updated totalRegions
* is the total number of regions of the table
* @throws ServiceException
* @param tableName
* @return Pair indicating the number of regions updated Pair.getFirst() is the
* regions that are yet to be updated Pair.getSecond() is the total number
* of regions of the table
* @throws IOException
*/
public GetSchemaAlterStatusResponse getSchemaAlterStatus(
RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException;
public Pair<Integer, Integer> getAlterStatus(byte[] tableName)
throws IOException;
/**
* Adds a column to the specified table
* @param controller Unused (set to null).
* @param req AddColumnRequest that contains:<br>
* - tableName: table to modify<br>
* - column: column descriptor
* @throws ServiceException
* @param tableName table to modify
* @param column column descriptor
* @throws IOException e
*/
public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
throws ServiceException;
public void addColumn(final byte [] tableName, HColumnDescriptor column)
throws IOException;
/**
* Modifies an existing column on the specified table
* @param controller Unused (set to null).
* @param req ModifyColumnRequest that contains:<br>
* - tableName: table name<br>
* - descriptor: new column descriptor
* @param tableName table name
* @param descriptor new column descriptor
* @throws IOException e
*/
public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
throws ServiceException;
public void modifyColumn(final byte [] tableName, HColumnDescriptor descriptor)
throws IOException;
/**
* Deletes a column from the specified table. Table must be disabled.
* @param controller Unused (set to null).
* @param req DeleteColumnRequest that contains:<br>
* - tableName: table to alter<br>
* - columnName: column family to remove
* @throws ServiceException
* @param tableName table to alter
* @param columnName column family to remove
* @throws IOException e
*/
public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
throws ServiceException;
public void deleteColumn(final byte [] tableName, final byte [] columnName)
throws IOException;
/**
* Puts the table on-line (only needed if table has been previously taken offline)
* @param controller Unused (set to null).
* @param req EnableTableRequest that contains:<br>
* - tableName: table to enable
* @throws ServiceException
* @param tableName table to enable
* @throws IOException e
*/
public EnableTableResponse enableTable(RpcController controller, EnableTableRequest req)
throws ServiceException;
public void enableTable(final byte [] tableName) throws IOException;
/**
* Take table offline
*
* @param controller Unused (set to null).
* @param req DisableTableRequest that contains:<br>
* - tableName: table to take offline
* @throws ServiceException
* @param tableName table to take offline
* @throws IOException e
*/
public DisableTableResponse disableTable(RpcController controller, DisableTableRequest req)
throws ServiceException;
public void disableTable(final byte [] tableName) throws IOException;
/**
* Modify a table's metadata
*
* @param controller Unused (set to null).
* @param req ModifyTableRequest that contains:<br>
* - tableName: table to modify<br>
* - tableSchema: new descriptor for table
* @throws ServiceException
* @param tableName table to modify
* @param htd new descriptor for table
* @throws IOException e
*/
public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
throws ServiceException;
public void modifyTable(byte[] tableName, HTableDescriptor htd)
throws IOException;
/**
* Shutdown an HBase cluster.
@ -280,15 +239,17 @@ public interface HMasterInterface extends VersionedProtocol {
throws ServiceException;
/**
* Get list of TableDescriptors for requested tables.
* @param controller Unused (set to null).
* @param req GetTableDescriptorsRequest that contains:<br>
* - tableNames: requested tables, or if empty, all are requested
* @return GetTableDescriptorsResponse
* @throws ServiceException
* Get array of all HTDs.
* @return array of HTableDescriptor
*/
public GetTableDescriptorsResponse getTableDescriptors(
RpcController controller, GetTableDescriptorsRequest req) throws ServiceException;
public HTableDescriptor[] getHTableDescriptors();
/**
* Get array of HTDs for requested tables.
* @param tableNames
* @return array of HTableDescriptor
*/
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames);
/**
* Assign a region to a server chosen at random.
@ -306,7 +267,7 @@ public interface HMasterInterface extends VersionedProtocol {
* back to the same server. Use {@link #moveRegion(RpcController,MoveRegionRequest}
* if you want to control the region movement.
* @param controller Unused (set to null).
* @param req The request that contains:<br>
* @param req The request which contains:<br>
* - region: Region to unassign. Will clear any existing RegionPlan
* if one found.<br>
* - force: If true, force unassign (Will remove region from
@ -320,7 +281,7 @@ public interface HMasterInterface extends VersionedProtocol {
/**
* Move a region to a specified destination server.
* @param controller Unused (set to null).
* @param req The request that contains:<br>
* @param req The request which contains:<br>
* - region: The encoded region name; i.e. the hash that makes
* up the region name suffix: e.g. if regionname is
* <code>TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396.</code>,
@ -333,5 +294,5 @@ public interface HMasterInterface extends VersionedProtocol {
* region named <code>encodedRegionName</code>
*/
public MoveRegionResponse moveRegion(RpcController controller, MoveRegionRequest req)
throws ServiceException;
throws ServiceException;
}

View File

@ -123,28 +123,8 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Re
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import com.google.protobuf.RpcController;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
@ -1025,7 +1005,7 @@ Server {
resp.addMapEntries(entry.build());
return resp.build();
} catch (IOException ioe) {
} catch(IOException ioe) {
throw new ServiceException(ioe);
}
}
@ -1070,7 +1050,7 @@ Server {
// Up our metrics.
this.metrics.incrementRequests(sl.getTotalNumberOfRequests());
}
} catch (IOException ioe) {
} catch(IOException ioe) {
throw new ServiceException(ioe);
}
@ -1314,7 +1294,6 @@ Server {
return mrr;
}
@Override
public void createTable(HTableDescriptor hTableDescriptor,
byte [][] splitKeys)
throws IOException {
@ -1331,23 +1310,10 @@ Server {
this.executorService.submit(new CreateTableHandler(this,
this.fileSystemManager, this.serverManager, hTableDescriptor, conf,
newRegions, catalogTracker, assignmentManager));
if (cpHost != null) {
cpHost.postCreateTable(hTableDescriptor, newRegions);
}
}
@Override
public CreateTableResponse createTable(RpcController controller, CreateTableRequest req)
throws ServiceException {
HTableDescriptor hTableDescriptor = HTableDescriptor.convert(req.getTableSchema());
byte [][] splitKeys = ProtobufUtil.getSplitKeysArray(req);
try {
createTable(hTableDescriptor,splitKeys);
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return CreateTableResponse.newBuilder().build();
}
private HRegionInfo[] getHRegionInfos(HTableDescriptor hTableDescriptor,
@ -1377,23 +1343,15 @@ Server {
}
@Override
public DeleteTableResponse deleteTable(RpcController controller, DeleteTableRequest request)
throws ServiceException {
byte [] tableName = request.getTableName().toByteArray();
try {
checkInitialized();
if (cpHost != null) {
cpHost.preDeleteTable(tableName);
}
this.executorService.submit(new DeleteTableHandler(tableName, this, this));
if (cpHost != null) {
cpHost.postDeleteTable(tableName);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
public void deleteTable(final byte [] tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDeleteTable(tableName);
}
this.executorService.submit(new DeleteTableHandler(tableName, this, this));
if (cpHost != null) {
cpHost.postDeleteTable(tableName);
}
return DeleteTableResponse.newBuilder().build();
}
/**
@ -1404,132 +1362,81 @@ Server {
* of regions of the table
* @throws IOException
*/
@Override
public GetSchemaAlterStatusResponse getSchemaAlterStatus(
RpcController controller, GetSchemaAlterStatusRequest req) throws ServiceException {
public Pair<Integer, Integer> getAlterStatus(byte[] tableName)
throws IOException {
// TODO: currently, we query using the table name on the client side. this
// may overlap with other table operations or the table operation may
// have completed before querying this API. We need to refactor to a
// transaction system in the future to avoid these ambiguities.
byte [] tableName = req.getTableName().toByteArray();
return this.assignmentManager.getReopenStatus(tableName);
}
try {
Pair<Integer,Integer> pair = this.assignmentManager.getReopenStatus(tableName);
GetSchemaAlterStatusResponse.Builder ret = GetSchemaAlterStatusResponse.newBuilder();
ret.setYetToUpdateRegions(pair.getFirst());
ret.setTotalRegions(pair.getSecond());
return ret.build();
} catch (IOException ioe) {
throw new ServiceException(ioe);
public void addColumn(byte [] tableName, HColumnDescriptor column)
throws IOException {
checkInitialized();
if (cpHost != null) {
if (cpHost.preAddColumn(tableName, column)) {
return;
}
}
new TableAddFamilyHandler(tableName, column, this, this).process();
if (cpHost != null) {
cpHost.postAddColumn(tableName, column);
}
}
public AddColumnResponse addColumn(RpcController controller, AddColumnRequest req)
throws ServiceException {
byte [] tableName = req.getTableName().toByteArray();
HColumnDescriptor column = HColumnDescriptor.convert(req.getColumnFamilies());
try {
checkInitialized();
if (cpHost != null) {
if (cpHost.preAddColumn(tableName, column)) {
return AddColumnResponse.newBuilder().build();
}
public void modifyColumn(byte [] tableName, HColumnDescriptor descriptor)
throws IOException {
checkInitialized();
if (cpHost != null) {
if (cpHost.preModifyColumn(tableName, descriptor)) {
return;
}
new TableAddFamilyHandler(tableName, column, this, this).process();
if (cpHost != null) {
cpHost.postAddColumn(tableName, column);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return AddColumnResponse.newBuilder().build();
new TableModifyFamilyHandler(tableName, descriptor, this, this).process();
if (cpHost != null) {
cpHost.postModifyColumn(tableName, descriptor);
}
}
public ModifyColumnResponse modifyColumn(RpcController controller, ModifyColumnRequest req)
throws ServiceException {
byte [] tableName = req.getTableName().toByteArray();
HColumnDescriptor descriptor = HColumnDescriptor.convert(req.getColumnFamilies());
try {
checkInitialized();
if (cpHost != null) {
if (cpHost.preModifyColumn(tableName, descriptor)) {
return ModifyColumnResponse.newBuilder().build();
}
public void deleteColumn(final byte [] tableName, final byte [] c)
throws IOException {
checkInitialized();
if (cpHost != null) {
if (cpHost.preDeleteColumn(tableName, c)) {
return;
}
new TableModifyFamilyHandler(tableName, descriptor, this, this).process();
if (cpHost != null) {
cpHost.postModifyColumn(tableName, descriptor);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
}
return ModifyColumnResponse.newBuilder().build();
new TableDeleteFamilyHandler(tableName, c, this, this).process();
if (cpHost != null) {
cpHost.postDeleteColumn(tableName, c);
}
}
@Override
public DeleteColumnResponse deleteColumn(RpcController controller, DeleteColumnRequest req)
throws ServiceException {
final byte [] tableName = req.getTableName().toByteArray();
final byte [] columnName = req.getColumnName().toByteArray();
try {
checkInitialized();
if (cpHost != null) {
if (cpHost.preDeleteColumn(tableName, columnName)) {
return DeleteColumnResponse.newBuilder().build();
}
}
new TableDeleteFamilyHandler(tableName, columnName, this, this).process();
if (cpHost != null) {
cpHost.postDeleteColumn(tableName, columnName);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
public void enableTable(final byte [] tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preEnableTable(tableName);
}
this.executorService.submit(new EnableTableHandler(this, tableName,
catalogTracker, assignmentManager, false));
if (cpHost != null) {
cpHost.postEnableTable(tableName);
}
return DeleteColumnResponse.newBuilder().build();
}
@Override
public EnableTableResponse enableTable(RpcController controller, EnableTableRequest request)
throws ServiceException {
byte [] tableName = request.getTableName().toByteArray();
try {
checkInitialized();
if (cpHost != null) {
cpHost.preEnableTable(tableName);
}
this.executorService.submit(new EnableTableHandler(this, tableName,
catalogTracker, assignmentManager, false));
if (cpHost != null) {
cpHost.postEnableTable(tableName);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
public void disableTable(final byte [] tableName) throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preDisableTable(tableName);
}
return EnableTableResponse.newBuilder().build();
}
this.executorService.submit(new DisableTableHandler(this, tableName,
catalogTracker, assignmentManager, false));
@Override
public DisableTableResponse disableTable(RpcController controller, DisableTableRequest request)
throws ServiceException {
byte [] tableName = request.getTableName().toByteArray();
try {
checkInitialized();
if (cpHost != null) {
cpHost.preDisableTable(tableName);
}
this.executorService.submit(new DisableTableHandler(this, tableName,
catalogTracker, assignmentManager, false));
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
if (cpHost != null) {
cpHost.postDisableTable(tableName);
}
return DisableTableResponse.newBuilder().build();
}
/**
@ -1568,26 +1475,19 @@ Server {
}
@Override
public ModifyTableResponse modifyTable(RpcController controller, ModifyTableRequest req)
throws ServiceException {
final byte [] tableName = req.getTableName().toByteArray();
HTableDescriptor htd = HTableDescriptor.convert(req.getTableSchema());
try {
checkInitialized();
if (cpHost != null) {
cpHost.preModifyTable(tableName, htd);
}
TableEventHandler tblHandle = new ModifyTableHandler(tableName, htd, this, this);
this.executorService.submit(tblHandle);
tblHandle.waitForPersist();
if (cpHost != null) {
cpHost.postModifyTable(tableName, htd);
}
} catch (IOException ioe) {
throw new ServiceException(ioe);
public void modifyTable(final byte[] tableName, HTableDescriptor htd)
throws IOException {
checkInitialized();
if (cpHost != null) {
cpHost.preModifyTable(tableName, htd);
}
TableEventHandler tblHandle = new ModifyTableHandler(tableName, htd, this, this);
this.executorService.submit(tblHandle);
tblHandle.waitForPersist();
if (cpHost != null) {
cpHost.postModifyTable(tableName, htd);
}
return ModifyTableResponse.newBuilder().build();
}
@Override
@ -2019,43 +1919,39 @@ Server {
}
/**
* Get list of TableDescriptors for requested tables.
* @param controller Unused (set to null).
* @param req GetTableDescriptorsRequest that contains:
* - tableNames: requested tables, or if empty, all are requested
* @return GetTableDescriptorsResponse
* @throws ServiceException
* Get HTD array for given tables
* @param tableNames
* @return HTableDescriptor[]
*/
public GetTableDescriptorsResponse getTableDescriptors(
RpcController controller, GetTableDescriptorsRequest req) throws ServiceException {
GetTableDescriptorsResponse.Builder builder = GetTableDescriptorsResponse.newBuilder();
if (req.getTableNamesCount() == 0) {
// request for all TableDescriptors
Map<String, HTableDescriptor> descriptors = null;
public HTableDescriptor[] getHTableDescriptors(List<String> tableNames) {
List<HTableDescriptor> list =
new ArrayList<HTableDescriptor>(tableNames.size());
for (String s: tableNames) {
HTableDescriptor htd = null;
try {
descriptors = this.tableDescriptors.getAll();
htd = this.tableDescriptors.get(s);
} catch (IOException e) {
LOG.warn("Failed getting all descriptors", e);
}
if (descriptors != null) {
for (HTableDescriptor htd : descriptors.values()) {
builder.addTableSchema(htd.convert());
}
LOG.warn("Failed getting descriptor for " + s, e);
}
if (htd == null) continue;
list.add(htd);
}
else {
for (String s: req.getTableNamesList()) {
HTableDescriptor htd = null;
try {
htd = this.tableDescriptors.get(s);
} catch (IOException e) {
LOG.warn("Failed getting descriptor for " + s, e);
}
if (htd == null) continue;
builder.addTableSchema(htd.convert());
}
return list.toArray(new HTableDescriptor [] {});
}
/**
* Get all table descriptors
* @return All descriptors or null if none.
*/
public HTableDescriptor [] getHTableDescriptors() {
Map<String, HTableDescriptor> descriptors = null;
try {
descriptors = this.tableDescriptors.getAll();
} catch (IOException e) {
LOG.warn("Failed getting all descriptors", e);
}
return builder.build();
return descriptors == null?
null: descriptors.values().toArray(new HTableDescriptor [] {});
}
/**

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Action;
@ -105,8 +104,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -292,36 +289,6 @@ public final class ProtobufUtil {
return new ServerName(hostName, port, startCode);
}
/**
* Get HTableDescriptor[] from GetTableDescriptorsResponse protobuf
*
* @param proto the GetTableDescriptorsResponse
* @return HTableDescriptor[]
*/
public static HTableDescriptor[] getHTableDescriptorArray(GetTableDescriptorsResponse proto) {
if (proto == null) return null;
HTableDescriptor[] ret = new HTableDescriptor[proto.getTableSchemaCount()];
for (int i = 0; i < proto.getTableSchemaCount(); ++i) {
ret[i] = HTableDescriptor.convert(proto.getTableSchema(i));
}
return ret;
}
/**
* get the split keys in form "byte [][]" from a CreateTableRequest proto
*
* @param proto the CreateTableRequest
* @return the split keys
*/
public static byte [][] getSplitKeysArray(final CreateTableRequest proto) {
byte [][] splitKeys = new byte[proto.getSplitKeysCount()][];
for (int i = 0; i < proto.getSplitKeysCount(); ++i) {
splitKeys[i] = proto.getSplitKeys(i).toByteArray();
}
return splitKeys;
}
/**
* Convert a protocol buffer Get to a client Get
*

View File

@ -26,9 +26,7 @@ import java.util.UUID;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.DeserializationException;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.Action;
@ -80,17 +78,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.UnlockRowRequest;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
@ -868,51 +856,6 @@ public final class RequestConverter {
return builder.build();
}
/**
* Create a protocol buffer AddColumnRequest
*
* @param tableName
* @param column
* @return an AddColumnRequest
*/
public static AddColumnRequest buildAddColumnRequest(
final byte [] tableName, final HColumnDescriptor column) {
AddColumnRequest.Builder builder = AddColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setColumnFamilies(column.convert());
return builder.build();
}
/**
* Create a protocol buffer DeleteColumnRequest
*
* @param tableName
* @param columnName
* @return a DeleteColumnRequest
*/
public static DeleteColumnRequest buildDeleteColumnRequest(
final byte [] tableName, final byte [] columnName) {
DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setColumnName(ByteString.copyFrom(columnName));
return builder.build();
}
/**
* Create a protocol buffer ModifyColumnRequest
*
* @param tableName
* @param column
* @return an ModifyColumnRequest
*/
public static ModifyColumnRequest buildModifyColumnRequest(
final byte [] tableName, final HColumnDescriptor column) {
ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
builder.setColumnFamilies(column.convert());
return builder.build();
}
/**
* Create a protocol buffer MoveRegionRequest
*
@ -937,7 +880,7 @@ public final class RequestConverter {
* Create a protocol buffer AssignRegionRequest
*
* @param regionName
* @return an AssignRegionRequest
* @return An AssignRegionRequest
*/
public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) {
AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder();
@ -950,7 +893,7 @@ public final class RequestConverter {
*
* @param regionName
* @param force
* @return an UnassignRegionRequest
* @return An UnassignRegionRequest
*/
public static UnassignRegionRequest buildUnassignRegionRequest(
final byte [] regionName, final boolean force) {
@ -960,106 +903,6 @@ public final class RequestConverter {
return builder.build();
}
/**
* Creates a protocol buffer DeleteTableRequest
*
* @param tableName
* @return a DeleteTableRequest
*/
public static DeleteTableRequest buildDeleteTableRequest(final byte [] tableName) {
DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
return builder.build();
}
/**
* Creates a protocol buffer EnableTableRequest
*
* @param tableName
* @return an EnableTableRequest
*/
public static EnableTableRequest buildEnableTableRequest(final byte [] tableName) {
EnableTableRequest.Builder builder = EnableTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
return builder.build();
}
/**
* Creates a protocol buffer DisableTableRequest
*
* @param tableName
* @return a DisableTableRequest
*/
public static DisableTableRequest buildDisableTableRequest(final byte [] tableName) {
DisableTableRequest.Builder builder = DisableTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(tableName));
return builder.build();
}
/**
* Creates a protocol buffer CreateTableRequest
*
* @param hTableDesc
* @param splitKeys
* @return a CreateTableRequest
*/
public static CreateTableRequest buildCreateTableRequest(
final HTableDescriptor hTableDesc, final byte [][] splitKeys) {
CreateTableRequest.Builder builder = CreateTableRequest.newBuilder();
builder.setTableSchema(hTableDesc.convert());
if (splitKeys != null) {
for (byte [] splitKey : splitKeys) {
builder.addSplitKeys(ByteString.copyFrom(splitKey));
}
}
return builder.build();
}
/**
* Creates a protocol buffer ModifyTableRequest
*
* @param table
* @param hTableDesc
* @return a ModifyTableRequest
*/
public static ModifyTableRequest buildModifyTableRequest(
final byte [] table, final HTableDescriptor hTableDesc) {
ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(table));
builder.setTableSchema(hTableDesc.convert());
return builder.build();
}
/**
* Creates a protocol buffer GetSchemaAlterStatusRequest
*
* @param tableName
* @return a GetSchemaAlterStatusRequest
*/
public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest(final byte [] table) {
GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder();
builder.setTableName(ByteString.copyFrom(table));
return builder.build();
}
/**
* Creates a protocol buffer GetTableDescriptorsRequest
*
* @param tableNames
* @return a GetTableDescriptorsRequest
*/
public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(
final List<String> tableNames) {
GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder();
if (tableNames != null) {
for (String str : tableNames) {
builder.addTableNames(str);
}
}
return builder.build();
}
/**
* Creates a protocol buffer IsMasterRunningRequest
*

View File

@ -26,32 +26,6 @@ option optimize_for = SPEED;
import "hbase.proto";
/* Column-level protobufs */
message AddColumnRequest {
required bytes tableName = 1;
required ColumnFamilySchema columnFamilies = 2;
}
message AddColumnResponse {
}
message DeleteColumnRequest {
required bytes tableName = 1;
required bytes columnName = 2;
}
message DeleteColumnResponse {
}
message ModifyColumnRequest {
required bytes tableName = 1;
required ColumnFamilySchema columnFamilies = 2;
}
message ModifyColumnResponse {
}
/* Region-level Protos */
message MoveRegionRequest {
@ -77,45 +51,6 @@ message UnassignRegionRequest {
message UnassignRegionResponse {
}
/* Table-level protobufs */
message CreateTableRequest {
required TableSchema tableSchema = 1;
repeated bytes splitKeys = 2;
}
message CreateTableResponse {
}
message DeleteTableRequest {
required bytes tableName = 1;
}
message DeleteTableResponse {
}
message EnableTableRequest {
required bytes tableName = 1;
}
message EnableTableResponse {
}
message DisableTableRequest {
required bytes tableName = 1;
}
message DisableTableResponse {
}
message ModifyTableRequest {
required bytes tableName = 1;
required TableSchema tableSchema = 2;
}
message ModifyTableResponse {
}
/* Cluster-level protobufs */
message IsMasterRunningRequest {
@ -153,36 +88,7 @@ message SetBalancerRunningResponse {
optional bool prevBalanceValue = 1;
}
message GetSchemaAlterStatusRequest {
required bytes tableName = 1;
}
message GetSchemaAlterStatusResponse {
optional uint32 yetToUpdateRegions = 1;
optional uint32 totalRegions = 2;
}
message GetTableDescriptorsRequest {
repeated string tableNames = 1;
}
message GetTableDescriptorsResponse {
repeated TableSchema tableSchema = 1;
}
service MasterService {
/** Adds a column to the specified table. */
rpc addColumn(AddColumnRequest)
returns(AddColumnResponse);
/** Deletes a column from the specified table. Table must be disabled. */
rpc deleteColumn(DeleteColumnRequest)
returns(DeleteColumnResponse);
/** Modifies an existing column on the specified table. */
rpc modifyColumn(ModifyColumnRequest)
returns(ModifyColumnResponse);
/** Move the region region to the destination server. */
rpc moveRegion(MoveRegionRequest)
returns(MoveRegionResponse);
@ -200,26 +106,6 @@ service MasterService {
rpc unassignRegion(UnassignRegionRequest)
returns(UnassignRegionResponse);
/** Deletes a table */
rpc deleteTable(DeleteTableRequest)
returns(DeleteTableResponse);
/** Puts the table on-line (only needed if table has been previously taken offline) */
rpc enableTable(EnableTableRequest)
returns(EnableTableResponse);
/** Take table offline */
rpc disableTable(DisableTableRequest)
returns(DisableTableResponse);
/** Modify a table's metadata */
rpc modifyTable(ModifyTableRequest)
returns(ModifyTableResponse);
/** Creates a new table asynchronously */
rpc createTable(CreateTableRequest)
returns(CreateTableResponse);
/** return true if master is available */
rpc isMasterRunning(IsMasterRunningRequest)
returns(IsMasterRunningResponse);
@ -246,12 +132,4 @@ service MasterService {
*/
rpc setBalancerRunning(SetBalancerRunningRequest)
returns(SetBalancerRunningResponse);
/** Used by the client to get the number of regions that have received the updated schema */
rpc getSchemaAlterStatus(GetSchemaAlterStatusRequest)
returns(GetSchemaAlterStatusResponse);
/** Get list of TableDescriptors for requested tables. */
rpc getTableDescriptors(GetTableDescriptorsRequest)
returns(GetTableDescriptorsResponse);
}