HBASE-9677 Remove MasterAdmin and MasterMonitor protos; have MasterService provide these functions

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1527719 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-09-30 19:05:58 +00:00
parent ba2e21a4e7
commit dc959eadc2
29 changed files with 41496 additions and 42413 deletions

View File

@ -89,37 +89,43 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@ -585,11 +591,11 @@ public class HBaseAdmin implements Abortable, Closeable {
}
}
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
CreateTableRequest request = RequestConverter.buildCreateTableRequest(desc, splitKeys);
masterAdmin.createTable(null, request);
master.createTable(null, request);
return null;
}
});
@ -614,11 +620,11 @@ public class HBaseAdmin implements Abortable, Closeable {
HRegionLocation firstMetaServer = getFirstMetaServerForTable(tableName);
boolean tableExists = true;
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
DeleteTableRequest req = RequestConverter.buildDeleteTableRequest(tableName);
masterAdmin.deleteTable(null,req);
master.deleteTable(null,req);
return null;
}
});
@ -649,7 +655,7 @@ public class HBaseAdmin implements Abortable, Closeable {
if (values == null || values.length == 0) {
tableExists = false;
GetTableDescriptorsResponse htds;
MasterMonitorKeepAliveConnection master = connection.getKeepAliveMasterMonitorService();
MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableName);
@ -819,12 +825,12 @@ public class HBaseAdmin implements Abortable, Closeable {
public void enableTableAsync(final TableName tableName)
throws IOException {
TableName.isLegalFullyQualifiedTableName(tableName.getName());
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
LOG.info("Started enable of " + tableName);
EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName);
masterAdmin.enableTable(null,req);
master.enableTable(null,req);
return null;
}
});
@ -896,12 +902,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void disableTableAsync(final TableName tableName) throws IOException {
TableName.isLegalFullyQualifiedTableName(tableName.getName());
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
LOG.info("Started disable of " + tableName);
DisableTableRequest req = RequestConverter.buildDisableTableRequest(tableName);
masterAdmin.disableTable(null,req);
master.disableTable(null,req);
return null;
}
});
@ -1115,12 +1121,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public Pair<Integer, Integer> getAlterStatus(final TableName tableName)
throws IOException {
return executeCallable(new MasterMonitorCallable<Pair<Integer, Integer>>(getConnection()) {
return executeCallable(new MasterCallable<Pair<Integer, Integer>>(getConnection()) {
@Override
public Pair<Integer, Integer> call() throws ServiceException {
GetSchemaAlterStatusRequest req = RequestConverter
.buildGetSchemaAlterStatusRequest(tableName);
GetSchemaAlterStatusResponse ret = masterMonitor.getSchemaAlterStatus(null, req);
GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(null, req);
Pair<Integer, Integer> pair = new Pair<Integer, Integer>(Integer.valueOf(ret
.getYetToUpdateRegions()), Integer.valueOf(ret.getTotalRegions()));
return pair;
@ -1182,11 +1188,11 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void addColumn(final TableName tableName, final HColumnDescriptor column)
throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
AddColumnRequest req = RequestConverter.buildAddColumnRequest(tableName, column);
masterAdmin.addColumn(null,req);
master.addColumn(null,req);
return null;
}
});
@ -1228,11 +1234,11 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void deleteColumn(final TableName tableName, final byte [] columnName)
throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
DeleteColumnRequest req = RequestConverter.buildDeleteColumnRequest(tableName, columnName);
masterAdmin.deleteColumn(null,req);
master.deleteColumn(null,req);
return null;
}
});
@ -1276,11 +1282,11 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void modifyColumn(final TableName tableName, final HColumnDescriptor descriptor)
throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
ModifyColumnRequest req = RequestConverter.buildModifyColumnRequest(tableName, descriptor);
masterAdmin.modifyColumn(null,req);
master.modifyColumn(null,req);
return null;
}
});
@ -1658,7 +1664,7 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void move(final byte [] encodedRegionName, final byte [] destServerName)
throws HBaseIOException, MasterNotRunningException, ZooKeeperConnectionException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
MoveRegionRequest request =
RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName);
@ -1686,12 +1692,12 @@ public class HBaseAdmin implements Abortable, Closeable {
public void assign(final byte[] regionName) throws MasterNotRunningException,
ZooKeeperConnectionException, IOException {
final byte[] toBeAssigned = getRegionName(regionName);
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
AssignRegionRequest request =
RequestConverter.buildAssignRegionRequest(toBeAssigned);
masterAdmin.assignRegion(null,request);
master.assignRegion(null,request);
return null;
}
});
@ -1714,12 +1720,12 @@ public class HBaseAdmin implements Abortable, Closeable {
public void unassign(final byte [] regionName, final boolean force)
throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
final byte[] toBeUnassigned = getRegionName(regionName);
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
UnassignRegionRequest request =
RequestConverter.buildUnassignRegionRequest(toBeUnassigned, force);
masterAdmin.unassignRegion(null,request);
master.unassignRegion(null,request);
return null;
}
});
@ -1730,7 +1736,7 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public void offline(final byte [] regionName)
throws IOException {
MasterAdminKeepAliveConnection master = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection master = connection.getKeepAliveMasterService();
try {
master.offlineRegion(null,RequestConverter.buildOfflineRegionRequest(regionName));
} catch (ServiceException se) {
@ -1748,7 +1754,7 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public boolean setBalancerRunning(final boolean on, final boolean synchronous)
throws MasterNotRunningException, ZooKeeperConnectionException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
SetBalancerRunningRequest req =
RequestConverter.buildSetBalancerRunningRequest(on, synchronous);
@ -1778,7 +1784,7 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public boolean balancer()
throws MasterNotRunningException, ZooKeeperConnectionException, ServiceException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
return stub.balance(null,RequestConverter.buildBalanceRequest()).getBalancerRan();
} finally {
@ -1795,7 +1801,7 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public boolean enableCatalogJanitor(boolean enable)
throws ServiceException, MasterNotRunningException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
return stub.enableCatalogJanitor(null,
RequestConverter.buildEnableCatalogJanitorRequest(enable)).getPrevValue();
@ -1811,7 +1817,7 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws MasterNotRunningException
*/
public int runCatalogScan() throws ServiceException, MasterNotRunningException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
return stub.runCatalogScan(null,
RequestConverter.buildCatalogScanRequest()).getScanResult();
@ -1826,7 +1832,7 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws org.apache.hadoop.hbase.MasterNotRunningException
*/
public boolean isCatalogJanitorEnabled() throws ServiceException, MasterNotRunningException {
MasterAdminKeepAliveConnection stub = connection.getKeepAliveMasterAdminService();
MasterKeepAliveConnection stub = connection.getKeepAliveMasterService();
try {
return stub.isCatalogJanitorEnabled(null,
RequestConverter.buildIsCatalogJanitorEnabledRequest()).getValue();
@ -1846,8 +1852,8 @@ public class HBaseAdmin implements Abortable, Closeable {
public void mergeRegions(final byte[] encodedNameOfRegionA,
final byte[] encodedNameOfRegionB, final boolean forcible)
throws IOException {
MasterAdminKeepAliveConnection master = connection
.getKeepAliveMasterAdminService();
MasterKeepAliveConnection master = connection
.getKeepAliveMasterService();
try {
DispatchMergingRegionsRequest request = RequestConverter
.buildDispatchMergingRegionsRequest(encodedNameOfRegionA,
@ -1971,11 +1977,11 @@ public class HBaseAdmin implements Abortable, Closeable {
"' doesn't match with the HTD one: " + htd.getTableName());
}
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
ModifyTableRequest request = RequestConverter.buildModifyTableRequest(tableName, htd);
masterAdmin.modifyTable(null, request);
master.modifyTable(null, request);
return null;
}
});
@ -2084,10 +2090,10 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
*/
public synchronized void shutdown() throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
masterAdmin.shutdown(null,ShutdownRequest.newBuilder().build());
master.shutdown(null,ShutdownRequest.newBuilder().build());
return null;
}
});
@ -2100,10 +2106,10 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
*/
public synchronized void stopMaster() throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
masterAdmin.stopMaster(null,StopMasterRequest.newBuilder().build());
master.stopMaster(null,StopMasterRequest.newBuilder().build());
return null;
}
});
@ -2136,11 +2142,11 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException if a remote or network exception occurs
*/
public ClusterStatus getClusterStatus() throws IOException {
return executeCallable(new MasterMonitorCallable<ClusterStatus>(getConnection()) {
return executeCallable(new MasterCallable<ClusterStatus>(getConnection()) {
@Override
public ClusterStatus call() throws ServiceException {
GetClusterStatusRequest req = RequestConverter.buildGetClusterStatusRequest();
return ClusterStatus.convert(masterMonitor.getClusterStatus(null,req).getClusterStatus());
return ClusterStatus.convert(master.getClusterStatus(null,req).getClusterStatus());
}
});
}
@ -2164,11 +2170,11 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException
*/
public void createNamespace(final NamespaceDescriptor descriptor) throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws Exception {
masterAdmin.createNamespace(null,
MasterAdminProtos.CreateNamespaceRequest.newBuilder()
master.createNamespace(null,
CreateNamespaceRequest.newBuilder()
.setNamespaceDescriptor(ProtobufUtil
.toProtoNamespaceDescriptor(descriptor)).build());
return null;
@ -2182,13 +2188,11 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException
*/
public void modifyNamespace(final NamespaceDescriptor descriptor) throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws Exception {
masterAdmin.modifyNamespace(null,
MasterAdminProtos.ModifyNamespaceRequest.newBuilder()
.setNamespaceDescriptor(ProtobufUtil
.toProtoNamespaceDescriptor(descriptor)).build());
master.modifyNamespace(null, ModifyNamespaceRequest.newBuilder().
setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(descriptor)).build());
return null;
}
});
@ -2200,12 +2204,11 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException
*/
public void deleteNamespace(final String name) throws IOException {
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws Exception {
masterAdmin.deleteNamespace(null,
MasterAdminProtos.DeleteNamespaceRequest.newBuilder()
.setNamespaceName(name).build());
master.deleteNamespace(null, DeleteNamespaceRequest.newBuilder().
setNamespaceName(name).build());
return null;
}
});
@ -2219,13 +2222,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public NamespaceDescriptor getNamespaceDescriptor(final String name) throws IOException {
return
executeCallable(new MasterAdminCallable<NamespaceDescriptor>(getConnection()) {
executeCallable(new MasterCallable<NamespaceDescriptor>(getConnection()) {
@Override
public NamespaceDescriptor call() throws Exception {
return ProtobufUtil.toNamespaceDescriptor(
masterAdmin.getNamespaceDescriptor(null,
MasterAdminProtos.GetNamespaceDescriptorRequest.newBuilder()
.setNamespaceName(name).build()).getNamespaceDescriptor());
master.getNamespaceDescriptor(null, GetNamespaceDescriptorRequest.newBuilder().
setNamespaceName(name).build()).getNamespaceDescriptor());
}
});
}
@ -2237,13 +2239,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public NamespaceDescriptor[] listNamespaceDescriptors() throws IOException {
return
executeCallable(new MasterAdminCallable<NamespaceDescriptor[]>(getConnection()) {
executeCallable(new MasterCallable<NamespaceDescriptor[]>(getConnection()) {
@Override
public NamespaceDescriptor[] call() throws Exception {
List<HBaseProtos.NamespaceDescriptor> list =
masterAdmin.listNamespaceDescriptors(null,
MasterAdminProtos.ListNamespaceDescriptorsRequest.newBuilder().build())
.getNamespaceDescriptorList();
master.listNamespaceDescriptors(null, ListNamespaceDescriptorsRequest.newBuilder().
build()).getNamespaceDescriptorList();
NamespaceDescriptor[] res = new NamespaceDescriptor[list.size()];
for(int i = 0; i < list.size(); i++) {
res[i] = ProtobufUtil.toNamespaceDescriptor(list.get(i));
@ -2261,14 +2262,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public HTableDescriptor[] listTableDescriptorsByNamespace(final String name) throws IOException {
return
executeCallable(new MasterAdminCallable<HTableDescriptor[]>(getConnection()) {
executeCallable(new MasterCallable<HTableDescriptor[]>(getConnection()) {
@Override
public HTableDescriptor[] call() throws Exception {
List<TableSchema> list =
masterAdmin.listTableDescriptorsByNamespace(null,
MasterAdminProtos.ListTableDescriptorsByNamespaceRequest.newBuilder()
.setNamespaceName(name).build())
.getTableSchemaList();
master.listTableDescriptorsByNamespace(null, ListTableDescriptorsByNamespaceRequest.
newBuilder().setNamespaceName(name).build()).getTableSchemaList();
HTableDescriptor[] res = new HTableDescriptor[list.size()];
for(int i=0; i < list.size(); i++) {
@ -2287,13 +2286,12 @@ public class HBaseAdmin implements Abortable, Closeable {
*/
public TableName[] listTableNamesByNamespace(final String name) throws IOException {
return
executeCallable(new MasterAdminCallable<TableName[]>(getConnection()) {
executeCallable(new MasterCallable<TableName[]>(getConnection()) {
@Override
public TableName[] call() throws Exception {
List<HBaseProtos.TableName> tableNames =
masterAdmin.listTableNamesByNamespace(null,
MasterAdminProtos.ListTableNamesByNamespaceRequest.newBuilder()
.setNamespaceName(name).build())
master.listTableNamesByNamespace(null, ListTableNamesByNamespaceRequest.
newBuilder().setNamespaceName(name).build())
.getTableNameList();
TableName[] result = new TableName[tableNames.size()];
for (int i = 0; i < tableNames.size(); i++) {
@ -2704,10 +2702,10 @@ public class HBaseAdmin implements Abortable, Closeable {
Thread.currentThread().interrupt();
}
LOG.debug("Getting current status of snapshot from master...");
done = executeCallable(new MasterAdminCallable<IsSnapshotDoneResponse>(getConnection()) {
done = executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
@Override
public IsSnapshotDoneResponse call() throws ServiceException {
return masterAdmin.isSnapshotDone(null, request);
return master.isSnapshotDone(null, request);
}
});
};
@ -2733,10 +2731,10 @@ public class HBaseAdmin implements Abortable, Closeable {
final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
.build();
// run the snapshot on the master
return executeCallable(new MasterAdminCallable<SnapshotResponse>(getConnection()) {
return executeCallable(new MasterCallable<SnapshotResponse>(getConnection()) {
@Override
public SnapshotResponse call() throws ServiceException {
return masterAdmin.snapshot(null, request);
return master.snapshot(null, request);
}
});
}
@ -2764,10 +2762,10 @@ public class HBaseAdmin implements Abortable, Closeable {
public boolean isSnapshotFinished(final SnapshotDescription snapshot)
throws IOException, HBaseSnapshotException, UnknownSnapshotException {
return executeCallable(new MasterAdminCallable<IsSnapshotDoneResponse>(getConnection()) {
return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection()) {
@Override
public IsSnapshotDoneResponse call() throws ServiceException {
return masterAdmin.isSnapshotDone(null,
return master.isSnapshotDone(null,
IsSnapshotDoneRequest.newBuilder().setSnapshot(snapshot).build());
}
}).getDone();
@ -2940,11 +2938,11 @@ public class HBaseAdmin implements Abortable, Closeable {
Thread.currentThread().interrupt();
}
LOG.debug("Getting current status of snapshot restore from master...");
done = executeCallable(new MasterAdminCallable<IsRestoreSnapshotDoneResponse>(
done = executeCallable(new MasterCallable<IsRestoreSnapshotDoneResponse>(
getConnection()) {
@Override
public IsRestoreSnapshotDoneResponse call() throws ServiceException {
return masterAdmin.isRestoreSnapshotDone(null, request);
return master.isRestoreSnapshotDone(null, request);
}
});
}
@ -2971,10 +2969,10 @@ public class HBaseAdmin implements Abortable, Closeable {
.build();
// run the snapshot restore on the master
return executeCallable(new MasterAdminCallable<RestoreSnapshotResponse>(getConnection()) {
return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
@Override
public RestoreSnapshotResponse call() throws ServiceException {
return masterAdmin.restoreSnapshot(null, request);
return master.restoreSnapshot(null, request);
}
});
}
@ -2985,10 +2983,10 @@ public class HBaseAdmin implements Abortable, Closeable {
* @throws IOException if a network error occurs
*/
public List<SnapshotDescription> listSnapshots() throws IOException {
return executeCallable(new MasterAdminCallable<List<SnapshotDescription>>(getConnection()) {
return executeCallable(new MasterCallable<List<SnapshotDescription>>(getConnection()) {
@Override
public List<SnapshotDescription> call() throws ServiceException {
return masterAdmin.getCompletedSnapshots(null, GetCompletedSnapshotsRequest.newBuilder().build())
return master.getCompletedSnapshots(null, GetCompletedSnapshotsRequest.newBuilder().build())
.getSnapshotsList();
}
});
@ -3041,10 +3039,10 @@ public class HBaseAdmin implements Abortable, Closeable {
// make sure the snapshot is possibly valid
TableName.isLegalFullyQualifiedTableName(Bytes.toBytes(snapshotName));
// do the delete
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
masterAdmin.deleteSnapshot(null,
master.deleteSnapshot(null,
DeleteSnapshotRequest.newBuilder().
setSnapshot(SnapshotDescription.newBuilder().setName(snapshotName).build()).build());
return null;
@ -3070,10 +3068,10 @@ public class HBaseAdmin implements Abortable, Closeable {
List<SnapshotDescription> snapshots = listSnapshots(pattern);
for (final SnapshotDescription snapshot : snapshots) {
// do the delete
executeCallable(new MasterAdminCallable<Void>(getConnection()) {
executeCallable(new MasterCallable<Void>(getConnection()) {
@Override
public Void call() throws ServiceException {
this.masterAdmin.deleteSnapshot(null,
this.master.deleteSnapshot(null,
DeleteSnapshotRequest.newBuilder().setSnapshot(snapshot).build());
return null;
}
@ -3082,61 +3080,29 @@ public class HBaseAdmin implements Abortable, Closeable {
}
/**
* @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)}
*/
abstract static class MasterAdminCallable<V> extends MasterCallable<V> {
protected MasterAdminKeepAliveConnection masterAdmin;
public MasterAdminCallable(final HConnection connection) {
super(connection);
}
@Override
public void prepare(boolean reload) throws IOException {
this.masterAdmin = this.connection.getKeepAliveMasterAdminService();
}
@Override
public void close() throws IOException {
// The above prepare could fail but this would still be called though masterAdmin is null
if (this.masterAdmin != null) this.masterAdmin.close();
}
}
/**
* @see {@link #executeCallable(org.apache.hadoop.hbase.client.HBaseAdmin.MasterCallable)}
*/
abstract static class MasterMonitorCallable<V> extends MasterCallable<V> {
protected MasterMonitorKeepAliveConnection masterMonitor;
public MasterMonitorCallable(final HConnection connection) {
super(connection);
}
@Override
public void prepare(boolean reload) throws IOException {
this.masterMonitor = this.connection.getKeepAliveMasterMonitorService();
}
@Override
public void close() throws IOException {
// The above prepare could fail but this would still be called though masterMonitor is null
if (this.masterMonitor != null) this.masterMonitor.close();
}
}
/**
* Parent of {@link MasterMonitorCallable} and {@link MasterAdminCallable}.
* Parent of {@link MasterCallable} and {@link MasterCallable}.
* Has common methods.
* @param <V>
*/
abstract static class MasterCallable<V> implements RetryingCallable<V>, Closeable {
protected HConnection connection;
protected MasterKeepAliveConnection master;
public MasterCallable(final HConnection connection) {
this.connection = connection;
}
@Override
public void prepare(boolean reload) throws IOException {
this.master = this.connection.getKeepAliveMasterService();
}
@Override
public void close() throws IOException {
// The above prepare could fail but this would still be called though masterAdmin is null
if (this.master != null) this.master.close();
}
@Override
public void throwable(Throwable t, boolean retrying) {
}

View File

@ -37,15 +37,14 @@ import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
/**
* A cluster connection. Knows how to find the master, locate regions out on the cluster,
* keeps a cache of locations and then knows how to recalibrate after they move.
* keeps a cache of locations and then knows how to re-calibrate after they move.
* {@link HConnectionManager} manages instances of this class. This is NOT a connection to a
* particular server but to all servers in the cluster. An implementation takes care of individual
* connections at a lower level.
* particular server but to all servers in the cluster. Individual connections are managed at a
* lower level.
*
* <p>HConnections are used by {@link HTable} mostly but also by
* {@link HBaseAdmin}, and {@link CatalogTracker}. HConnection instances can be shared. Sharing
@ -350,14 +349,10 @@ public interface HConnection extends Abortable, Closeable {
final boolean offlined) throws IOException;
/**
* Returns a {@link MasterAdminKeepAliveConnection} to the active master
* Returns a {@link MasterKeepAliveConnection} to the active master
*/
MasterAdminService.BlockingInterface getMasterAdmin() throws IOException;
MasterService.BlockingInterface getMaster() throws IOException;
/**
* Returns an {@link MasterMonitorKeepAliveConnection} to the active master
*/
MasterMonitorService.BlockingInterface getMasterMonitor() throws IOException;
/**
* Establishes a connection to the region server at the specified address.
@ -502,27 +497,20 @@ public interface HConnection extends Abortable, Closeable {
void clearCaches(final ServerName sn);
/**
* This function allows HBaseAdmin and potentially others to get a shared MasterMonitor
* This function allows HBaseAdmin and potentially others to get a shared MasterService
* connection.
* @return The shared instance. Never returns null.
* @throws MasterNotRunningException
* @deprecated Since 0.96.0
*/
// TODO: Why is this in the public interface when the returned type is shutdown package access?
MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService()
@Deprecated
MasterKeepAliveConnection getKeepAliveMasterService()
throws MasterNotRunningException;
/**
* This function allows HBaseAdmin and potentially others to get a shared MasterAdminProtocol
* connection.
* @return The shared instance. Never returns null.
* @throws MasterNotRunningException
*/
// TODO: Why is this in the public interface when the returned type is shutdown package access?
MasterAdminKeepAliveConnection getKeepAliveMasterAdminService() throws MasterNotRunningException;
/**
* @param serverName
* @return true if the server is known as dead, false otherwise.
*/
boolean isDeadServer(ServerName serverName);
}
}

View File

@ -72,85 +72,43 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetNamespaceDescriptorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@ -170,16 +128,18 @@ import com.google.protobuf.ServiceException;
/**
* A non-instantiable class that manages creation of {@link HConnection}s.
* <p>The simplest way to use this class is by using {@link #createConnection(Configuration)}.
* This creates a new {@link HConnection} that is managed by the caller.
* This creates a new {@link HConnection} to the cluster that is managed by the caller.
* From this {@link HConnection} {@link HTableInterface} implementations are retrieved
* with {@link HConnection#getTable(byte[])}. Example:
* <pre>
* {@code
* HConnection connection = HConnectionManager.createConnection(config);
* HTableInterface table = connection.getTable("table1");
* // use the table as needed, for a single operation and a single thread
* table.close();
* connection.close();
* try {
* // Use the table as needed, for a single operation and a single thread
* } finally {
* table.close();
* connection.close();
* }
* </pre>
* <p>The following logic and API will be removed in the future:
@ -276,7 +236,6 @@ public class HConnectionManager {
* @throws ZooKeeperConnectionException
*/
@Deprecated
@SuppressWarnings("resource")
public static HConnection getConnection(final Configuration conf)
throws IOException {
HConnectionKey connectionKey = new HConnectionKey(conf);
@ -875,12 +834,8 @@ public class HConnectionManager {
// When getting the master connection, we check it's running,
// so if there is no exception, it means we've been able to get a
// connection on a running master
MasterMonitorKeepAliveConnection m = getKeepAliveMasterMonitorService();
try {
m.close();
} catch (IOException e) {
throw new MasterNotRunningException("Failed close", e);
}
MasterKeepAliveConnection m = getKeepAliveMasterService();
m.close();
return true;
}
@ -1500,10 +1455,11 @@ public class HConnectionManager {
new ConcurrentHashMap<String, String>();
/**
* Maintains current state of MasterService instance.
* State of the MasterService connection/setup.
*/
static abstract class MasterServiceState {
static class MasterServiceState {
HConnection connection;
MasterService.BlockingInterface stub;
int userCount;
long keepAliveUntil = Long.MAX_VALUE;
@ -1512,70 +1468,21 @@ public class HConnectionManager {
this.connection = connection;
}
abstract Object getStub();
abstract void clearStub();
abstract boolean isMasterRunning() throws ServiceException;
}
/**
* State of the MasterAdminService connection/setup.
*/
static class MasterAdminServiceState extends MasterServiceState {
MasterAdminService.BlockingInterface stub;
MasterAdminServiceState(final HConnection connection) {
super(connection);
}
@Override
public String toString() {
return "MasterAdminService";
return "MasterService";
}
@Override
Object getStub() {
return this.stub;
}
@Override
void clearStub() {
this.stub = null;
}
@Override
boolean isMasterRunning() throws ServiceException {
MasterProtos.IsMasterRunningResponse response =
this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest());
return response != null? response.getIsMasterRunning(): false;
}
}
/**
* State of the MasterMonitorService connection/setup.
*/
static class MasterMonitorServiceState extends MasterServiceState {
MasterMonitorService.BlockingInterface stub;
MasterMonitorServiceState(final HConnection connection) {
super(connection);
}
@Override
public String toString() {
return "MasterMonitorService";
}
@Override
Object getStub() {
return this.stub;
}
@Override
void clearStub() {
this.stub = null;
}
@Override
boolean isMasterRunning() throws ServiceException {
MasterProtos.IsMasterRunningResponse response =
IsMasterRunningResponse response =
this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest());
return response != null? response.getIsMasterRunning(): false;
}
@ -1709,24 +1616,24 @@ public class HConnectionManager {
}
/**
* Class to make a MasterMonitorService stub.
* Class to make a MasterServiceStubMaker stub.
*/
class MasterMonitorServiceStubMaker extends StubMaker {
private MasterMonitorService.BlockingInterface stub;
class MasterServiceStubMaker extends StubMaker {
private MasterService.BlockingInterface stub;
@Override
protected String getServiceName() {
return MasterMonitorService.getDescriptor().getName();
return MasterService.getDescriptor().getName();
}
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SWL_SLEEP_WITH_LOCK_HELD")
MasterMonitorService.BlockingInterface makeStub() throws MasterNotRunningException {
return (MasterMonitorService.BlockingInterface)super.makeStub();
MasterService.BlockingInterface makeStub() throws MasterNotRunningException {
return (MasterService.BlockingInterface)super.makeStub();
}
@Override
protected Object makeStub(BlockingRpcChannel channel) {
this.stub = MasterMonitorService.newBlockingStub(channel);
this.stub = MasterService.newBlockingStub(channel);
return this.stub;
}
@ -1736,35 +1643,6 @@ public class HConnectionManager {
}
}
/**
* Class to make a MasterAdminService stub.
*/
class MasterAdminServiceStubMaker extends StubMaker {
private MasterAdminService.BlockingInterface stub;
@Override
protected String getServiceName() {
return MasterAdminService.getDescriptor().getName();
}
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings("SWL_SLEEP_WITH_LOCK_HELD")
MasterAdminService.BlockingInterface makeStub() throws MasterNotRunningException {
return (MasterAdminService.BlockingInterface)super.makeStub();
}
@Override
protected Object makeStub(BlockingRpcChannel channel) {
this.stub = MasterAdminService.newBlockingStub(channel);
return this.stub;
}
@Override
protected void isMasterRunning() throws ServiceException {
this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest());
}
};
@Override
public AdminService.BlockingInterface getAdmin(final ServerName serverName)
throws IOException {
@ -1911,8 +1789,8 @@ public class HConnectionManager {
hci.keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
}
}
closeMasterProtocol(hci.adminMasterServiceState);
closeMasterProtocol(hci.monitorMasterServiceState);
closeMasterProtocol(hci.masterServiceState);
closeMasterProtocol(hci.masterServiceState);
}
}
@ -1940,19 +1818,11 @@ public class HConnectionManager {
}
}
final MasterAdminServiceState adminMasterServiceState = new MasterAdminServiceState(this);
final MasterMonitorServiceState monitorMasterServiceState =
new MasterMonitorServiceState(this);
final MasterServiceState masterServiceState = new MasterServiceState(this);
@Override
public MasterAdminService.BlockingInterface getMasterAdmin() throws MasterNotRunningException {
return getKeepAliveMasterAdminService();
}
@Override
public MasterMonitorService.BlockingInterface getMasterMonitor()
throws MasterNotRunningException {
return getKeepAliveMasterMonitorService();
public MasterService.BlockingInterface getMaster() throws MasterNotRunningException {
return getKeepAliveMasterService();
}
private void resetMasterServiceState(final MasterServiceState mss) {
@ -1961,34 +1831,36 @@ public class HConnectionManager {
}
@Override
public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService()
public MasterKeepAliveConnection getKeepAliveMasterService()
throws MasterNotRunningException {
synchronized (masterAndZKLock) {
if (!isKeepAliveMasterConnectedAndRunning(this.adminMasterServiceState)) {
MasterAdminServiceStubMaker stubMaker = new MasterAdminServiceStubMaker();
this.adminMasterServiceState.stub = stubMaker.makeStub();
if (!isKeepAliveMasterConnectedAndRunning(this.masterServiceState)) {
MasterServiceStubMaker stubMaker = new MasterServiceStubMaker();
this.masterServiceState.stub = stubMaker.makeStub();
}
resetMasterServiceState(this.adminMasterServiceState);
resetMasterServiceState(this.masterServiceState);
}
// Ugly delegation just so we can add in a Close method.
final MasterAdminService.BlockingInterface stub = this.adminMasterServiceState.stub;
return new MasterAdminKeepAliveConnection() {
MasterAdminServiceState mss = adminMasterServiceState;
final MasterService.BlockingInterface stub = this.masterServiceState.stub;
return new MasterKeepAliveConnection() {
MasterServiceState mss = masterServiceState;
@Override
public AddColumnResponse addColumn(RpcController controller,
AddColumnRequest request) throws ServiceException {
public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request)
throws ServiceException {
return stub.addColumn(controller, request);
}
@Override
public DeleteColumnResponse deleteColumn(RpcController controller,
DeleteColumnRequest request) throws ServiceException {
DeleteColumnRequest request)
throws ServiceException {
return stub.deleteColumn(controller, request);
}
@Override
public ModifyColumnResponse modifyColumn(RpcController controller,
ModifyColumnRequest request) throws ServiceException {
ModifyColumnRequest request)
throws ServiceException {
return stub.modifyColumn(controller, request);
}
@ -2152,7 +2024,9 @@ public class HConnectionManager {
}
@Override
public ModifyNamespaceResponse modifyNamespace(RpcController controller, ModifyNamespaceRequest request) throws ServiceException {
public ModifyNamespaceResponse modifyNamespace(RpcController controller,
ModifyNamespaceRequest request)
throws ServiceException {
return stub.modifyNamespace(controller, request);
}
@ -2191,29 +2065,7 @@ public class HConnectionManager {
public void close() {
release(this.mss);
}
};
}
private static void release(MasterServiceState mss) {
if (mss != null && mss.connection != null) {
((HConnectionImplementation)mss.connection).releaseMaster(mss);
}
}
@Override
public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService()
throws MasterNotRunningException {
synchronized (masterAndZKLock) {
if (!isKeepAliveMasterConnectedAndRunning(this.monitorMasterServiceState)) {
MasterMonitorServiceStubMaker stubMaker = new MasterMonitorServiceStubMaker();
this.monitorMasterServiceState.stub = stubMaker.makeStub();
}
resetMasterServiceState(this.monitorMasterServiceState);
}
// Ugly delegation just so can implement close
final MasterMonitorService.BlockingInterface stub = this.monitorMasterServiceState.stub;
return new MasterMonitorKeepAliveConnection() {
final MasterMonitorServiceState mss = monitorMasterServiceState;
@Override
public GetSchemaAlterStatusResponse getSchemaAlterStatus(
RpcController controller, GetSchemaAlterStatusRequest request)
@ -2241,20 +2093,15 @@ public class HConnectionManager {
throws ServiceException {
return stub.getClusterStatus(controller, request);
}
@Override
public IsMasterRunningResponse isMasterRunning(
RpcController controller, IsMasterRunningRequest request)
throws ServiceException {
return stub.isMasterRunning(controller, request);
}
@Override
public void close() throws IOException {
release(this.mss);
}
};
}
private static void release(MasterServiceState mss) {
if (mss != null && mss.connection != null) {
((HConnectionImplementation)mss.connection).releaseMaster(mss);
}
}
private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) {
if (mss.getStub() == null){
@ -2297,8 +2144,7 @@ public class HConnectionManager {
*/
private void closeMaster() {
synchronized (masterAndZKLock) {
closeMasterService(adminMasterServiceState);
closeMasterService(monitorMasterServiceState);
closeMasterService(masterServiceState);
}
}
@ -2683,7 +2529,7 @@ public class HConnectionManager {
@Override
public HTableDescriptor[] listTables() throws IOException {
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
MasterKeepAliveConnection master = getKeepAliveMasterService();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
@ -2707,7 +2553,7 @@ public class HConnectionManager {
@Override
public TableName[] listTableNames() throws IOException {
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
MasterKeepAliveConnection master = getKeepAliveMasterService();
try {
return ProtobufUtil.getTableNameArray(master.getTableNames(null,
GetTableNamesRequest.newBuilder().build())
@ -2723,7 +2569,7 @@ public class HConnectionManager {
public HTableDescriptor[] getHTableDescriptorsByTableName(
List<TableName> tableNames) throws IOException {
if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
MasterKeepAliveConnection master = getKeepAliveMasterService();
try {
GetTableDescriptorsRequest req =
RequestConverter.buildGetTableDescriptorsRequest(tableNames);
@ -2760,7 +2606,7 @@ public class HConnectionManager {
if (tableName.equals(TableName.META_TABLE_NAME)) {
return HTableDescriptor.META_TABLEDESC;
}
MasterMonitorKeepAliveConnection master = getKeepAliveMasterMonitorService();
MasterKeepAliveConnection master = getKeepAliveMasterService();
GetTableDescriptorsResponse htds;
try {
GetTableDescriptorsRequest req =

View File

@ -20,7 +20,7 @@
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
/**
* A KeepAlive connection is not physically closed immediately after the close,
@ -30,15 +30,11 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
* client.
*
* <p>This class is intended to be used internally by HBase classes that need to make invocations
* against the master on the MasterAdminProtos.MasterAdminService.BlockingInterface; but not by
* against the master on the MasterProtos.MasterService.BlockingInterface; but not by
* final user code. Hence it's package protected.
*/
interface MasterAdminKeepAliveConnection
extends MasterAdminProtos.MasterAdminService.BlockingInterface {
/**
* Close down all resources.
*/
// The Closeable Interface wants to throw an IOE out of a close.
// Thats a PITA. Do this below instead of Closeable.
interface MasterKeepAliveConnection
extends MasterProtos.MasterService.BlockingInterface {
// Do this instead of implement Closeable because closeable returning IOE is PITA.
void close();
}
}

View File

@ -1,39 +0,0 @@
/**
* Copyright The Apache Software Foundation
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.Closeable;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
/**
* A KeepAlive connection is not physically closed immediately after the close,
* but rather kept alive for a few minutes. It makes sense only if it's shared.
*
* This interface is used by a dynamic proxy. It allows to have a #close
* function in a master client.
*
* This class is intended to be used internally by HBase classes that need to
* speak the MasterMonitorProtocol; but not by final user code. Hence it's
* package protected.
*/
interface MasterMonitorKeepAliveConnection
extends MasterMonitorProtos.MasterMonitorService.BlockingInterface, Closeable {}

View File

@ -64,7 +64,7 @@ public class MasterCoprocessorRpcChannel extends CoprocessorRpcChannel{
.setServiceName(method.getService().getFullName())
.setMethodName(method.getName())
.setRequest(request.toByteString()).build();
CoprocessorServiceResponse result = ProtobufUtil.execService(connection.getMasterAdmin(), call);
CoprocessorServiceResponse result = ProtobufUtil.execService(connection.getMaster(), call);
Message response = null;
if (result.getValue().hasValue()) {
response = responsePrototype.newBuilderForType()

View File

@ -109,9 +109,9 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@ -1327,7 +1327,7 @@ public final class ProtobufUtil {
}
public static CoprocessorServiceResponse execService(
final MasterAdminService.BlockingInterface client, final CoprocessorServiceCall call)
final MasterService.BlockingInterface client, final CoprocessorServiceCall call)
throws IOException {
CoprocessorServiceRequest request = CoprocessorServiceRequest.newBuilder()
.setCall(call).setRegion(

View File

@ -75,28 +75,28 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CompareType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;

View File

@ -43,8 +43,8 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.security.access.UserPermission;

View File

@ -17,17 +17,15 @@
*/
package org.apache.hadoop.hbase.security;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.TokenIdentifier.Kind;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* Maps RPC protocol interfaces to required configuration
*/
@ -40,9 +38,7 @@ public class SecurityInfo {
new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
infos.put(ClientProtos.ClientService.getDescriptor().getName(),
new SecurityInfo("hbase.regionserver.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
infos.put(MasterAdminProtos.MasterAdminService.getDescriptor().getName(),
new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
infos.put(MasterMonitorProtos.MasterMonitorService.getDescriptor().getName(),
infos.put(MasterService.getDescriptor().getName(),
new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));
infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(),
new SecurityInfo("hbase.master.kerberos.principal", Kind.HBASE_AUTH_TOKEN));

View File

@ -22,21 +22,18 @@ import static org.junit.Assert.fail;
import java.io.IOException;
import com.google.protobuf.ByteString;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@ -81,9 +78,9 @@ public class TestSnapshotFromAdmin {
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries);
conf.setLong("hbase.client.pause", pauseTime);
// mock the master admin to our mock
MasterAdminKeepAliveConnection mockMaster = Mockito.mock(MasterAdminKeepAliveConnection.class);
MasterKeepAliveConnection mockMaster = Mockito.mock(MasterKeepAliveConnection.class);
Mockito.when(mockConnection.getConfiguration()).thenReturn(conf);
Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(mockMaster);
Mockito.when(mockConnection.getKeepAliveMasterService()).thenReturn(mockMaster);
// set the max wait time for the snapshot to complete
SnapshotResponse response = SnapshotResponse.newBuilder()
.setExpectedTimeout(maxWaitTime)
@ -140,8 +137,8 @@ public class TestSnapshotFromAdmin {
failSnapshotStart(admin, builder.setName("snapshot").setTable("tab%le").build());
// mock the master connection
MasterAdminKeepAliveConnection master = Mockito.mock(MasterAdminKeepAliveConnection.class);
Mockito.when(mockConnection.getKeepAliveMasterAdminService()).thenReturn(master);
MasterKeepAliveConnection master = Mockito.mock(MasterKeepAliveConnection.class);
Mockito.when(mockConnection.getKeepAliveMasterService()).thenReturn(master);
SnapshotResponse response = SnapshotResponse.newBuilder().setExpectedTimeout(0).build();
Mockito.when(
master.snapshot((RpcController) Mockito.isNull(), Mockito.any(SnapshotRequest.class)))

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.util.HashMap;
import com.google.common.collect.Sets;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterManager.ServiceType;
@ -31,8 +32,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
@ -134,17 +134,10 @@ public class DistributedHBaseCluster extends HBaseCluster {
}
@Override
public MasterAdminProtos.MasterAdminService.BlockingInterface getMasterAdmin()
public MasterService.BlockingInterface getMaster()
throws IOException {
HConnection conn = HConnectionManager.getConnection(conf);
return conn.getMasterAdmin();
}
@Override
public MasterMonitorProtos.MasterMonitorService.BlockingInterface getMasterMonitor()
throws IOException {
HConnection conn = HConnectionManager.getConnection(conf);
return conn.getMasterMonitor();
return conn.getMaster();
}
@Override
@ -175,7 +168,7 @@ public class DistributedHBaseCluster extends HBaseCluster {
long start = System.currentTimeMillis();
while (System.currentTimeMillis() - start < timeout) {
try {
getMasterAdmin();
getMaster();
return true;
} catch (MasterNotRunningException m) {
LOG.warn("Master not started yet " + m);

View File

@ -16,7 +16,8 @@
* limitations under the License.
*/
// This file contains protocol buffers that are used for protocols implemented by the master.
// All to do with the Master. Includes schema management since these
// changes are run by the Master process.
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "MasterProtos";
@ -24,6 +25,306 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
import "Client.proto";
import "ClusterStatus.proto";
/* Column-level protobufs */
message AddColumnRequest {
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
message AddColumnResponse {
}
message DeleteColumnRequest {
required TableName table_name = 1;
required bytes column_name = 2;
}
message DeleteColumnResponse {
}
message ModifyColumnRequest {
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
message ModifyColumnResponse {
}
/* Region-level Protos */
message MoveRegionRequest {
required RegionSpecifier region = 1;
optional ServerName dest_server_name = 2;
}
message MoveRegionResponse {
}
/**
* Dispatch merging the specified regions.
*/
message DispatchMergingRegionsRequest {
required RegionSpecifier region_a = 1;
required RegionSpecifier region_b = 2;
optional bool forcible = 3 [default = false];
}
message DispatchMergingRegionsResponse {
}
message AssignRegionRequest {
required RegionSpecifier region = 1;
}
message AssignRegionResponse {
}
message UnassignRegionRequest {
required RegionSpecifier region = 1;
optional bool force = 2 [default = false];
}
message UnassignRegionResponse {
}
message OfflineRegionRequest {
required RegionSpecifier region = 1;
}
message OfflineRegionResponse {
}
/* Table-level protobufs */
message CreateTableRequest {
required TableSchema table_schema = 1;
repeated bytes split_keys = 2;
}
message CreateTableResponse {
}
message DeleteTableRequest {
required TableName table_name = 1;
}
message DeleteTableResponse {
}
message EnableTableRequest {
required TableName table_name = 1;
}
message EnableTableResponse {
}
message DisableTableRequest {
required TableName table_name = 1;
}
message DisableTableResponse {
}
message ModifyTableRequest {
required TableName table_name = 1;
required TableSchema table_schema = 2;
}
message ModifyTableResponse {
}
/* Namespace-level protobufs */
message CreateNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message CreateNamespaceResponse {
}
message DeleteNamespaceRequest {
required string namespaceName = 1;
}
message DeleteNamespaceResponse {
}
message ModifyNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message ModifyNamespaceResponse {
}
message GetNamespaceDescriptorRequest {
required string namespaceName = 1;
}
message GetNamespaceDescriptorResponse {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message ListNamespaceDescriptorsRequest {
}
message ListNamespaceDescriptorsResponse {
repeated NamespaceDescriptor namespaceDescriptor = 1;
}
message ListTableDescriptorsByNamespaceRequest {
required string namespaceName = 1;
}
message ListTableDescriptorsByNamespaceResponse {
repeated TableSchema tableSchema = 1;
}
message ListTableNamesByNamespaceRequest {
required string namespaceName = 1;
}
message ListTableNamesByNamespaceResponse {
repeated TableName tableName = 1;
}
/* Cluster-level protobufs */
message ShutdownRequest {
}
message ShutdownResponse {
}
message StopMasterRequest {
}
message StopMasterResponse {
}
message BalanceRequest {
}
message BalanceResponse {
required bool balancer_ran = 1;
}
message SetBalancerRunningRequest {
required bool on = 1;
optional bool synchronous = 2;
}
message SetBalancerRunningResponse {
optional bool prev_balance_value = 1;
}
message RunCatalogScanRequest {
}
message RunCatalogScanResponse {
optional int32 scan_result = 1;
}
message EnableCatalogJanitorRequest {
required bool enable = 1;
}
message EnableCatalogJanitorResponse {
optional bool prev_value = 1;
}
message IsCatalogJanitorEnabledRequest {
}
message IsCatalogJanitorEnabledResponse {
required bool value = 1;
}
message SnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message SnapshotResponse {
required int64 expected_timeout = 1;
}
message GetCompletedSnapshotsRequest {
}
message GetCompletedSnapshotsResponse {
repeated SnapshotDescription snapshots = 1;
}
message DeleteSnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message DeleteSnapshotResponse {
}
message RestoreSnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message RestoreSnapshotResponse {
}
/* if you don't send the snapshot, then you will get it back
* in the response (if the snapshot is done) so you can check the snapshot
*/
message IsSnapshotDoneRequest {
optional SnapshotDescription snapshot = 1;
}
message IsSnapshotDoneResponse {
optional bool done = 1 [default = false];
optional SnapshotDescription snapshot = 2;
}
message IsRestoreSnapshotDoneRequest {
optional SnapshotDescription snapshot = 1;
}
message IsRestoreSnapshotDoneResponse {
optional bool done = 1 [default = false];
}
message GetSchemaAlterStatusRequest {
required TableName table_name = 1;
}
message GetSchemaAlterStatusResponse {
optional uint32 yet_to_update_regions = 1;
optional uint32 total_regions = 2;
}
message GetTableDescriptorsRequest {
repeated TableName table_names = 1;
}
message GetTableDescriptorsResponse {
repeated TableSchema table_schema = 1;
}
message GetTableNamesRequest {
}
message GetTableNamesResponse {
repeated TableName table_names = 1;
}
message GetClusterStatusRequest {
}
message GetClusterStatusResponse {
required ClusterStatus cluster_status = 1;
}
message IsMasterRunningRequest {
}
@ -32,7 +333,188 @@ message IsMasterRunningResponse {
}
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
returns(GetSchemaAlterStatusResponse);
/** Get list of TableDescriptors for requested tables. */
rpc GetTableDescriptors(GetTableDescriptorsRequest)
returns(GetTableDescriptorsResponse);
/** Get the list of table names. */
rpc GetTableNames(GetTableNamesRequest)
returns(GetTableNamesResponse);
/** Return cluster status. */
rpc GetClusterStatus(GetClusterStatusRequest)
returns(GetClusterStatusResponse);
/** return true if master is available */
rpc IsMasterRunning(IsMasterRunningRequest)
returns(IsMasterRunningResponse);
rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
/** Adds a column to the specified table. */
rpc AddColumn(AddColumnRequest)
returns(AddColumnResponse);
/** Deletes a column from the specified table. Table must be disabled. */
rpc DeleteColumn(DeleteColumnRequest)
returns(DeleteColumnResponse);
/** Modifies an existing column on the specified table. */
rpc ModifyColumn(ModifyColumnRequest)
returns(ModifyColumnResponse);
/** Move the region region to the destination server. */
rpc MoveRegion(MoveRegionRequest)
returns(MoveRegionResponse);
/** Master dispatch merging the regions */
rpc DispatchMergingRegions(DispatchMergingRegionsRequest)
returns(DispatchMergingRegionsResponse);
/** Assign a region to a server chosen at random. */
rpc AssignRegion(AssignRegionRequest)
returns(AssignRegionResponse);
/**
* Unassign a region from current hosting regionserver. Region will then be
* assigned to a regionserver chosen at random. Region could be reassigned
* back to the same server. Use MoveRegion if you want
* to control the region movement.
*/
rpc UnassignRegion(UnassignRegionRequest)
returns(UnassignRegionResponse);
/**
* Offline a region from the assignment manager's in-memory state. The
* region should be in a closed state and there will be no attempt to
* automatically reassign the region as in unassign. This is a special
* method, and should only be used by experts or hbck.
*/
rpc OfflineRegion(OfflineRegionRequest)
returns(OfflineRegionResponse);
/** Deletes a table */
rpc DeleteTable(DeleteTableRequest)
returns(DeleteTableResponse);
/** Puts the table on-line (only needed if table has been previously taken offline) */
rpc EnableTable(EnableTableRequest)
returns(EnableTableResponse);
/** Take table offline */
rpc DisableTable(DisableTableRequest)
returns(DisableTableResponse);
/** Modify a table's metadata */
rpc ModifyTable(ModifyTableRequest)
returns(ModifyTableResponse);
/** Creates a new table asynchronously */
rpc CreateTable(CreateTableRequest)
returns(CreateTableResponse);
/** Shutdown an HBase cluster. */
rpc Shutdown(ShutdownRequest)
returns(ShutdownResponse);
/** Stop HBase Master only. Does not shutdown the cluster. */
rpc StopMaster(StopMasterRequest)
returns(StopMasterResponse);
/**
* Run the balancer. Will run the balancer and if regions to move, it will
* go ahead and do the reassignments. Can NOT run for various reasons.
* Check logs.
*/
rpc Balance(BalanceRequest)
returns(BalanceResponse);
/**
* Turn the load balancer on or off.
* If synchronous is true, it waits until current balance() call, if outstanding, to return.
*/
rpc SetBalancerRunning(SetBalancerRunningRequest)
returns(SetBalancerRunningResponse);
/** Get a run of the catalog janitor */
rpc RunCatalogScan(RunCatalogScanRequest)
returns(RunCatalogScanResponse);
/**
* Enable the catalog janitor on or off.
*/
rpc EnableCatalogJanitor(EnableCatalogJanitorRequest)
returns(EnableCatalogJanitorResponse);
/**
* Query whether the catalog janitor is enabled.
*/
rpc IsCatalogJanitorEnabled(IsCatalogJanitorEnabledRequest)
returns(IsCatalogJanitorEnabledResponse);
/**
* Call a master coprocessor endpoint
*/
rpc ExecMasterService(CoprocessorServiceRequest)
returns(CoprocessorServiceResponse);
/**
* Create a snapshot for the given table.
*/
rpc Snapshot(SnapshotRequest) returns(SnapshotResponse);
/**
* Get completed snapshots.
* Returns a list of snapshot descriptors for completed snapshots
*/
rpc GetCompletedSnapshots(GetCompletedSnapshotsRequest) returns(GetCompletedSnapshotsResponse);
/**
* Delete an existing snapshot. This method can also be used to clean up an aborted snapshot.
*/
rpc DeleteSnapshot(DeleteSnapshotRequest) returns(DeleteSnapshotResponse);
/**
* Determine if the snapshot is done yet.
*/
rpc IsSnapshotDone(IsSnapshotDoneRequest) returns(IsSnapshotDoneResponse);
/**
* Restore a snapshot
*/
rpc RestoreSnapshot(RestoreSnapshotRequest) returns(RestoreSnapshotResponse);
/**
* Determine if the snapshot restore is done yet.
*/
rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
/** Modify a namespace's metadata */
rpc ModifyNamespace(ModifyNamespaceRequest)
returns(ModifyNamespaceResponse);
/** Creates a new namespace synchronously */
rpc CreateNamespace(CreateNamespaceRequest)
returns(CreateNamespaceResponse);
/** Deletes namespace synchronously */
rpc DeleteNamespace(DeleteNamespaceRequest)
returns(DeleteNamespaceResponse);
/** Get a namespace descriptor by name */
rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest)
returns(GetNamespaceDescriptorResponse);
/** returns a list of namespaces */
rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest)
returns(ListNamespaceDescriptorsResponse);
/** returns a list of tables for a given namespace*/
rpc ListTableDescriptorsByNamespace(ListTableDescriptorsByNamespaceRequest)
returns(ListTableDescriptorsByNamespaceResponse);
/** returns a list of tables for a given namespace*/
rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
returns(ListTableNamesByNamespaceResponse);
}

View File

@ -1,467 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file contains protocol buffers that are used for MasterAdminProtocol.
import "Master.proto";
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "MasterAdminProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
import "Client.proto";
/* Column-level protobufs */
message AddColumnRequest {
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
message AddColumnResponse {
}
message DeleteColumnRequest {
required TableName table_name = 1;
required bytes column_name = 2;
}
message DeleteColumnResponse {
}
message ModifyColumnRequest {
required TableName table_name = 1;
required ColumnFamilySchema column_families = 2;
}
message ModifyColumnResponse {
}
/* Region-level Protos */
message MoveRegionRequest {
required RegionSpecifier region = 1;
optional ServerName dest_server_name = 2;
}
message MoveRegionResponse {
}
/**
* Dispatch merging the specified regions.
*/
message DispatchMergingRegionsRequest {
required RegionSpecifier region_a = 1;
required RegionSpecifier region_b = 2;
optional bool forcible = 3 [default = false];
}
message DispatchMergingRegionsResponse {
}
message AssignRegionRequest {
required RegionSpecifier region = 1;
}
message AssignRegionResponse {
}
message UnassignRegionRequest {
required RegionSpecifier region = 1;
optional bool force = 2 [default = false];
}
message UnassignRegionResponse {
}
message OfflineRegionRequest {
required RegionSpecifier region = 1;
}
message OfflineRegionResponse {
}
/* Table-level protobufs */
message CreateTableRequest {
required TableSchema table_schema = 1;
repeated bytes split_keys = 2;
}
message CreateTableResponse {
}
message DeleteTableRequest {
required TableName table_name = 1;
}
message DeleteTableResponse {
}
message EnableTableRequest {
required TableName table_name = 1;
}
message EnableTableResponse {
}
message DisableTableRequest {
required TableName table_name = 1;
}
message DisableTableResponse {
}
message ModifyTableRequest {
required TableName table_name = 1;
required TableSchema table_schema = 2;
}
message ModifyTableResponse {
}
/* Namespace-level protobufs */
message CreateNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message CreateNamespaceResponse {
}
message DeleteNamespaceRequest {
required string namespaceName = 1;
}
message DeleteNamespaceResponse {
}
message ModifyNamespaceRequest {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message ModifyNamespaceResponse {
}
message GetNamespaceDescriptorRequest {
required string namespaceName = 1;
}
message GetNamespaceDescriptorResponse {
required NamespaceDescriptor namespaceDescriptor = 1;
}
message ListNamespaceDescriptorsRequest {
}
message ListNamespaceDescriptorsResponse {
repeated NamespaceDescriptor namespaceDescriptor = 1;
}
message ListTableDescriptorsByNamespaceRequest {
required string namespaceName = 1;
}
message ListTableDescriptorsByNamespaceResponse {
repeated TableSchema tableSchema = 1;
}
message ListTableNamesByNamespaceRequest {
required string namespaceName = 1;
}
message ListTableNamesByNamespaceResponse {
repeated TableName tableName = 1;
}
/* Cluster-level protobufs */
message ShutdownRequest {
}
message ShutdownResponse {
}
message StopMasterRequest {
}
message StopMasterResponse {
}
message BalanceRequest {
}
message BalanceResponse {
required bool balancer_ran = 1;
}
message SetBalancerRunningRequest {
required bool on = 1;
optional bool synchronous = 2;
}
message SetBalancerRunningResponse {
optional bool prev_balance_value = 1;
}
message RunCatalogScanRequest {
}
message RunCatalogScanResponse {
optional int32 scan_result = 1;
}
message EnableCatalogJanitorRequest {
required bool enable = 1;
}
message EnableCatalogJanitorResponse {
optional bool prev_value = 1;
}
message IsCatalogJanitorEnabledRequest {
}
message IsCatalogJanitorEnabledResponse {
required bool value = 1;
}
message SnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message SnapshotResponse {
required int64 expected_timeout = 1;
}
message GetCompletedSnapshotsRequest {
}
message GetCompletedSnapshotsResponse {
repeated SnapshotDescription snapshots = 1;
}
message DeleteSnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message DeleteSnapshotResponse {
}
message RestoreSnapshotRequest {
required SnapshotDescription snapshot = 1;
}
message RestoreSnapshotResponse {
}
/* if you don't send the snapshot, then you will get it back
* in the response (if the snapshot is done) so you can check the snapshot
*/
message IsSnapshotDoneRequest {
optional SnapshotDescription snapshot = 1;
}
message IsSnapshotDoneResponse {
optional bool done = 1 [default = false];
optional SnapshotDescription snapshot = 2;
}
message IsRestoreSnapshotDoneRequest {
optional SnapshotDescription snapshot = 1;
}
message IsRestoreSnapshotDoneResponse {
optional bool done = 1 [default = false];
}
service MasterAdminService {
/** Adds a column to the specified table. */
rpc AddColumn(AddColumnRequest)
returns(AddColumnResponse);
/** Deletes a column from the specified table. Table must be disabled. */
rpc DeleteColumn(DeleteColumnRequest)
returns(DeleteColumnResponse);
/** Modifies an existing column on the specified table. */
rpc ModifyColumn(ModifyColumnRequest)
returns(ModifyColumnResponse);
/** Move the region region to the destination server. */
rpc MoveRegion(MoveRegionRequest)
returns(MoveRegionResponse);
/** Master dispatch merging the regions */
rpc DispatchMergingRegions(DispatchMergingRegionsRequest)
returns(DispatchMergingRegionsResponse);
/** Assign a region to a server chosen at random. */
rpc AssignRegion(AssignRegionRequest)
returns(AssignRegionResponse);
/**
* Unassign a region from current hosting regionserver. Region will then be
* assigned to a regionserver chosen at random. Region could be reassigned
* back to the same server. Use MoveRegion if you want
* to control the region movement.
*/
rpc UnassignRegion(UnassignRegionRequest)
returns(UnassignRegionResponse);
/**
* Offline a region from the assignment manager's in-memory state. The
* region should be in a closed state and there will be no attempt to
* automatically reassign the region as in unassign. This is a special
* method, and should only be used by experts or hbck.
*/
rpc OfflineRegion(OfflineRegionRequest)
returns(OfflineRegionResponse);
/** Deletes a table */
rpc DeleteTable(DeleteTableRequest)
returns(DeleteTableResponse);
/** Puts the table on-line (only needed if table has been previously taken offline) */
rpc EnableTable(EnableTableRequest)
returns(EnableTableResponse);
/** Take table offline */
rpc DisableTable(DisableTableRequest)
returns(DisableTableResponse);
/** Modify a table's metadata */
rpc ModifyTable(ModifyTableRequest)
returns(ModifyTableResponse);
/** Creates a new table asynchronously */
rpc CreateTable(CreateTableRequest)
returns(CreateTableResponse);
/** Shutdown an HBase cluster. */
rpc Shutdown(ShutdownRequest)
returns(ShutdownResponse);
/** Stop HBase Master only. Does not shutdown the cluster. */
rpc StopMaster(StopMasterRequest)
returns(StopMasterResponse);
/**
* Run the balancer. Will run the balancer and if regions to move, it will
* go ahead and do the reassignments. Can NOT run for various reasons.
* Check logs.
*/
rpc Balance(BalanceRequest)
returns(BalanceResponse);
/**
* Turn the load balancer on or off.
* If synchronous is true, it waits until current balance() call, if outstanding, to return.
*/
rpc SetBalancerRunning(SetBalancerRunningRequest)
returns(SetBalancerRunningResponse);
/** Get a run of the catalog janitor */
rpc RunCatalogScan(RunCatalogScanRequest)
returns(RunCatalogScanResponse);
/**
* Enable the catalog janitor on or off.
*/
rpc EnableCatalogJanitor(EnableCatalogJanitorRequest)
returns(EnableCatalogJanitorResponse);
/**
* Query whether the catalog janitor is enabled.
*/
rpc IsCatalogJanitorEnabled(IsCatalogJanitorEnabledRequest)
returns(IsCatalogJanitorEnabledResponse);
/**
* Call a master coprocessor endpoint
*/
rpc ExecMasterService(CoprocessorServiceRequest)
returns(CoprocessorServiceResponse);
/**
* Create a snapshot for the given table.
*/
rpc Snapshot(SnapshotRequest) returns(SnapshotResponse);
/**
* Get completed snapshots.
* Returns a list of snapshot descriptors for completed snapshots
*/
rpc GetCompletedSnapshots(GetCompletedSnapshotsRequest) returns(GetCompletedSnapshotsResponse);
/**
* Delete an existing snapshot. This method can also be used to clean up an aborted snapshot.
*/
rpc DeleteSnapshot(DeleteSnapshotRequest) returns(DeleteSnapshotResponse);
/**
* Determine if the snapshot is done yet.
*/
rpc IsSnapshotDone(IsSnapshotDoneRequest) returns(IsSnapshotDoneResponse);
/**
* Restore a snapshot
*/
rpc RestoreSnapshot(RestoreSnapshotRequest) returns(RestoreSnapshotResponse);
/**
* Determine if the snapshot restore is done yet.
*/
rpc IsRestoreSnapshotDone(IsRestoreSnapshotDoneRequest) returns(IsRestoreSnapshotDoneResponse);
/** return true if master is available */
rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
/** Modify a namespace's metadata */
rpc ModifyNamespace(ModifyNamespaceRequest)
returns(ModifyNamespaceResponse);
/** Creates a new namespace synchronously */
rpc CreateNamespace(CreateNamespaceRequest)
returns(CreateNamespaceResponse);
/** Deletes namespace synchronously */
rpc DeleteNamespace(DeleteNamespaceRequest)
returns(DeleteNamespaceResponse);
/** Get a namespace descriptor by name */
rpc GetNamespaceDescriptor(GetNamespaceDescriptorRequest)
returns(GetNamespaceDescriptorResponse);
/** returns a list of namespaces */
rpc ListNamespaceDescriptors(ListNamespaceDescriptorsRequest)
returns(ListNamespaceDescriptorsResponse);
/** returns a list of tables for a given namespace*/
rpc ListTableDescriptorsByNamespace(ListTableDescriptorsByNamespaceRequest)
returns(ListTableDescriptorsByNamespaceResponse);
/** returns a list of tables for a given namespace*/
rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
returns(ListTableNamesByNamespaceResponse);
}

View File

@ -1,81 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file contains protocol buffers that are used for MasterMonitorProtocol.
import "Master.proto";
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "MasterMonitorProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
import "ClusterStatus.proto";
message GetSchemaAlterStatusRequest {
required TableName table_name = 1;
}
message GetSchemaAlterStatusResponse {
optional uint32 yet_to_update_regions = 1;
optional uint32 total_regions = 2;
}
message GetTableDescriptorsRequest {
repeated TableName table_names = 1;
}
message GetTableDescriptorsResponse {
repeated TableSchema table_schema = 1;
}
message GetTableNamesRequest {
}
message GetTableNamesResponse {
repeated TableName table_names = 1;
}
message GetClusterStatusRequest {
}
message GetClusterStatusResponse {
required ClusterStatus cluster_status = 1;
}
service MasterMonitorService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
returns(GetSchemaAlterStatusResponse);
/** Get list of TableDescriptors for requested tables. */
rpc GetTableDescriptors(GetTableDescriptorsRequest)
returns(GetTableDescriptorsResponse);
/** Get the list of table names. */
rpc GetTableNames(GetTableNamesRequest)
returns(GetTableNamesResponse);
/** Return cluster status. */
rpc GetClusterStatus(GetClusterStatusRequest)
returns(GetClusterStatusResponse);
/** return true if master is available */
rpc IsMasterRunning(IsMasterRunningRequest) returns(IsMasterRunningResponse);
}

View File

@ -34,8 +34,6 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.HTableInterface;
import org.apache.hadoop.hbase.client.MasterAdminKeepAliveConnection;
import org.apache.hadoop.hbase.client.MasterMonitorKeepAliveConnection;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
@ -43,7 +41,6 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService.BlockingInterface;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -283,14 +280,9 @@ public class CoprocessorHConnection implements HConnection {
return delegate.locateRegions(tableName, useCache, offlined);
}
public BlockingInterface getMasterAdmin() throws IOException {
return delegate.getMasterAdmin();
}
public
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService.BlockingInterface
getMasterMonitor() throws IOException {
return delegate.getMasterMonitor();
public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService.BlockingInterface getMaster()
throws IOException {
return delegate.getMaster();
}
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface
@ -380,14 +372,9 @@ public class CoprocessorHConnection implements HConnection {
delegate.deleteCachedRegionLocation(location);
}
public MasterMonitorKeepAliveConnection getKeepAliveMasterMonitorService()
public MasterKeepAliveConnection getKeepAliveMasterService()
throws MasterNotRunningException {
return delegate.getKeepAliveMasterMonitorService();
}
public MasterAdminKeepAliveConnection getKeepAliveMasterAdminService()
throws MasterNotRunningException {
return delegate.getKeepAliveMasterAdminService();
return delegate.getKeepAliveMasterService();
}
public boolean isDeadServer(ServerName serverName) {

View File

@ -41,7 +41,6 @@ import java.util.concurrent.atomic.AtomicReference;
import javax.management.ObjectName;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -77,14 +76,15 @@ import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
import org.apache.hadoop.hbase.ipc.RequestContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
import org.apache.hadoop.hbase.ipc.RequestContext;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.ipc.ServerRpcController;
import org.apache.hadoop.hbase.master.balancer.BalancerChore;
@ -114,70 +114,83 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DispatchMergingRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
@ -218,11 +231,9 @@ import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.net.DNS;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.protobuf.Descriptors;
import com.google.protobuf.Message;
import com.google.protobuf.RpcCallback;
@ -247,9 +258,7 @@ import com.google.protobuf.ServiceException;
*/
@InterfaceAudience.Private
@SuppressWarnings("deprecation")
public class HMaster extends HasThread
implements MasterMonitorProtos.MasterMonitorService.BlockingInterface,
MasterAdminProtos.MasterAdminService.BlockingInterface,
public class HMaster extends HasThread implements MasterProtos.MasterService.BlockingInterface,
RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
MasterServices, Server {
private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
@ -505,11 +514,8 @@ MasterServices, Server {
private List<BlockingServiceAndInterface> getServices() {
List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(3);
bssi.add(new BlockingServiceAndInterface(
MasterMonitorProtos.MasterMonitorService.newReflectiveBlockingService(this),
MasterMonitorProtos.MasterMonitorService.BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(
MasterAdminProtos.MasterAdminService.newReflectiveBlockingService(this),
MasterAdminProtos.MasterAdminService.BlockingInterface.class));
MasterProtos.MasterService.newReflectiveBlockingService(this),
MasterProtos.MasterService.BlockingInterface.class));
bssi.add(new BlockingServiceAndInterface(
RegionServerStatusProtos.RegionServerStatusService.newReflectiveBlockingService(this),
RegionServerStatusProtos.RegionServerStatusService.BlockingInterface.class));
@ -2946,44 +2952,44 @@ MasterServices, Server {
}
@Override
public MasterAdminProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller,
MasterAdminProtos.ModifyNamespaceRequest request) throws ServiceException {
public ModifyNamespaceResponse modifyNamespace(RpcController controller,
ModifyNamespaceRequest request) throws ServiceException {
try {
modifyNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
return MasterAdminProtos.ModifyNamespaceResponse.getDefaultInstance();
return ModifyNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.CreateNamespaceResponse createNamespace(RpcController controller,
MasterAdminProtos.CreateNamespaceRequest request) throws ServiceException {
public CreateNamespaceResponse createNamespace(RpcController controller,
CreateNamespaceRequest request) throws ServiceException {
try {
createNamespace(ProtobufUtil.toNamespaceDescriptor(request.getNamespaceDescriptor()));
return MasterAdminProtos.CreateNamespaceResponse.getDefaultInstance();
return CreateNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller,
MasterAdminProtos.DeleteNamespaceRequest request) throws ServiceException {
public DeleteNamespaceResponse deleteNamespace(RpcController controller,
DeleteNamespaceRequest request) throws ServiceException {
try {
deleteNamespace(request.getNamespaceName());
return MasterAdminProtos.DeleteNamespaceResponse.getDefaultInstance();
return DeleteNamespaceResponse.getDefaultInstance();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public MasterAdminProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor(
RpcController controller, MasterAdminProtos.GetNamespaceDescriptorRequest request)
public GetNamespaceDescriptorResponse getNamespaceDescriptor(
RpcController controller, GetNamespaceDescriptorRequest request)
throws ServiceException {
try {
return MasterAdminProtos.GetNamespaceDescriptorResponse.newBuilder()
return GetNamespaceDescriptorResponse.newBuilder()
.setNamespaceDescriptor(
ProtobufUtil.toProtoNamespaceDescriptor(getNamespaceDescriptor(request.getNamespaceName())))
.build();
@ -2993,12 +2999,12 @@ MasterServices, Server {
}
@Override
public MasterAdminProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors(
RpcController controller, MasterAdminProtos.ListNamespaceDescriptorsRequest request)
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(
RpcController controller, ListNamespaceDescriptorsRequest request)
throws ServiceException {
try {
MasterAdminProtos.ListNamespaceDescriptorsResponse.Builder response =
MasterAdminProtos.ListNamespaceDescriptorsResponse.newBuilder();
ListNamespaceDescriptorsResponse.Builder response =
ListNamespaceDescriptorsResponse.newBuilder();
for(NamespaceDescriptor ns: listNamespaceDescriptors()) {
response.addNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(ns));
}
@ -3009,12 +3015,12 @@ MasterServices, Server {
}
@Override
public MasterAdminProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
RpcController controller, MasterAdminProtos.ListTableDescriptorsByNamespaceRequest request)
public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
RpcController controller, ListTableDescriptorsByNamespaceRequest request)
throws ServiceException {
try {
MasterAdminProtos.ListTableDescriptorsByNamespaceResponse.Builder b =
MasterAdminProtos.ListTableDescriptorsByNamespaceResponse.newBuilder();
ListTableDescriptorsByNamespaceResponse.Builder b =
ListTableDescriptorsByNamespaceResponse.newBuilder();
for(HTableDescriptor htd: listTableDescriptorsByNamespace(request.getNamespaceName())) {
b.addTableSchema(htd.convert());
}
@ -3025,12 +3031,12 @@ MasterServices, Server {
}
@Override
public MasterAdminProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace(
RpcController controller, MasterAdminProtos.ListTableNamesByNamespaceRequest request)
public ListTableNamesByNamespaceResponse listTableNamesByNamespace(
RpcController controller, ListTableNamesByNamespaceRequest request)
throws ServiceException {
try {
MasterAdminProtos.ListTableNamesByNamespaceResponse.Builder b =
MasterAdminProtos.ListTableNamesByNamespaceResponse.newBuilder();
ListTableNamesByNamespaceResponse.Builder b =
ListTableNamesByNamespaceResponse.newBuilder();
for (TableName tableName: listTableNamesByNamespace(request.getNamespaceName())) {
b.addTableName(ProtobufUtil.toProtoTableName(tableName));
}

View File

@ -20,8 +20,7 @@ package org.apache.hadoop.hbase.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.ProxyUsers;
@ -36,8 +35,7 @@ public class HBasePolicyProvider extends PolicyProvider {
protected final static Service[] services = {
new Service("security.client.protocol.acl", ClientService.BlockingInterface.class),
new Service("security.client.protocol.acl", AdminService.BlockingInterface.class),
new Service("security.admin.protocol.acl", MasterMonitorService.BlockingInterface.class),
new Service("security.admin.protocol.acl", MasterAdminService.BlockingInterface.class),
new Service("security.admin.protocol.acl", MasterService.BlockingInterface.class),
new Service("security.masterregion.protocol.acl", RegionServerStatusService.BlockingInterface.class)
};

View File

@ -27,8 +27,7 @@ import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.util.Threads;
/**
@ -98,13 +97,7 @@ public abstract class HBaseCluster implements Closeable, Configurable {
/**
* Returns an {@link MasterAdminService.BlockingInterface} to the active master
*/
public abstract MasterAdminService.BlockingInterface getMasterAdmin()
throws IOException;
/**
* Returns an {@link MasterMonitorService.BlockingInterface} to the active master
*/
public abstract MasterMonitorService.BlockingInterface getMasterMonitor()
public abstract MasterService.BlockingInterface getMaster()
throws IOException;
/**

View File

@ -33,8 +33,6 @@ import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -377,16 +375,6 @@ public class MiniHBaseCluster extends HBaseCluster {
return t;
}
@Override
public MasterAdminService.BlockingInterface getMasterAdmin() {
return this.hbaseCluster.getActiveMaster();
}
@Override
public MasterMonitorService.BlockingInterface getMasterMonitor() {
return this.hbaseCluster.getActiveMaster();
}
/**
* Returns the current active master, if available.
* @return the active HMaster, null if none is active.

View File

@ -31,11 +31,12 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
import org.mortbay.log.Log;
import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
@ -63,12 +64,12 @@ public class TestHBaseAdminNoCluster {
HConnection connection = HConnectionTestingUtility.getMockedConnection(configuration);
// Mock so we get back the master interface. Make it so when createTable is called, we throw
// the PleaseHoldException.
MasterAdminKeepAliveConnection masterAdmin =
Mockito.mock(MasterAdminKeepAliveConnection.class);
MasterKeepAliveConnection masterAdmin =
Mockito.mock(MasterKeepAliveConnection.class);
Mockito.when(masterAdmin.createTable((RpcController)Mockito.any(),
(MasterAdminProtos.CreateTableRequest)Mockito.any())).
thenThrow(new ServiceException("Test fail").initCause(new PleaseHoldException("test")));
Mockito.when(connection.getKeepAliveMasterAdminService()).thenReturn(masterAdmin);
(CreateTableRequest)Mockito.any())).
thenThrow(new ServiceException("Test fail").initCause(new PleaseHoldException("test")));
Mockito.when(connection.getKeepAliveMasterService()).thenReturn(masterAdmin);
// Mock up our admin Interfaces
HBaseAdmin admin = new HBaseAdmin(configuration);
try {
@ -83,7 +84,7 @@ public class TestHBaseAdminNoCluster {
}
// Assert we were called 'count' times.
Mockito.verify(masterAdmin, Mockito.atLeast(count)).createTable((RpcController)Mockito.any(),
(MasterAdminProtos.CreateTableRequest)Mockito.any());
(CreateTableRequest)Mockito.any());
} finally {
admin.close();
if (connection != null)HConnectionManager.deleteConnection(configuration);

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.HRegionServer;

View File

@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.security.User;
import org.junit.Test;
@ -57,8 +57,8 @@ public class TestHMasterRPCException {
try {
BlockingRpcChannel channel =
rpcClient.createBlockingRpcChannel(sm, User.getCurrent(), 0);
MasterMonitorProtos.MasterMonitorService.BlockingInterface stub =
MasterMonitorProtos.MasterMonitorService.newBlockingStub(channel);
MasterProtos.MasterService.BlockingInterface stub =
MasterProtos.MasterService.newBlockingStub(channel);
stub.isMasterRunning(null, IsMasterRunningRequest.getDefaultInstance());
fail();
} catch (ServiceException ex) {

View File

@ -33,24 +33,23 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;

View File

@ -36,12 +36,12 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotEnabledException;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@ -51,8 +51,8 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;

View File

@ -95,7 +95,7 @@ public class TestHTraceHooks {
int createTableCount = 0;
for (Span s : spansByParentIdMap.get(createTableRoot.getSpanId())) {
if (s.getDescription().startsWith("MasterAdminService.CreateTable")) {
if (s.getDescription().startsWith("MasterService.CreateTable")) {
createTableCount++;
}
}