diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index cc7a975c1a3..93216dbd8ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -332,6 +332,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
editlog.protofsimage.protoFederationProtocol.proto
+ RouterProtocol.proto
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index b1f44a4196e..d51a8e2e3db 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -31,6 +31,7 @@ function hadoop_usage
hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
hadoop_add_option "--workers" "turn on worker mode"
+<<<<<<< HEAD
hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility"
hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache"
hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
@@ -42,6 +43,7 @@ function hadoop_usage
hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node"
hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI"
+ hadoop_add_subcommand "federation" admin "manage Router-based federation"
hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode"
hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility"
hadoop_add_subcommand "getconf" client "get config values from configuration"
@@ -181,6 +183,9 @@ function hdfscmd_case
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router'
;;
+ federation)
+ HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin'
+ ;;
secondarynamenode)
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
index b9853d622be..53bdf70edac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
@@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
)
)
- set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router debug
+ set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router federation debug
for %%i in ( %hdfscommands% ) do (
if %hdfs-command% == %%i set hdfscommand=true
)
@@ -184,6 +184,11 @@ goto :eof
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
goto :eof
+:federation
+ set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
+ set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
+ goto :eof
+
:debug
set CLASS=org.apache.hadoop.hdfs.tools.DebugAdmin
goto :eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9b77a908dba..629ad00fa62 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -1196,6 +1196,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String FEDERATION_STORE_PREFIX =
FEDERATION_ROUTER_PREFIX + "store.";
+ public static final String DFS_ROUTER_STORE_ENABLE =
+ FEDERATION_STORE_PREFIX + "enable";
+ public static final boolean DFS_ROUTER_STORE_ENABLE_DEFAULT = true;
+
public static final String FEDERATION_STORE_SERIALIZER_CLASS =
DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer";
public static final Class
@@ -1222,6 +1226,21 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS_DEFAULT =
TimeUnit.MINUTES.toMillis(5);
+ // HDFS Router-based federation admin
+ public static final String DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY =
+ FEDERATION_ROUTER_PREFIX + "admin.handler.count";
+ public static final int DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT = 1;
+ public static final int DFS_ROUTER_ADMIN_PORT_DEFAULT = 8111;
+ public static final String DFS_ROUTER_ADMIN_ADDRESS_KEY =
+ FEDERATION_ROUTER_PREFIX + "admin-address";
+ public static final String DFS_ROUTER_ADMIN_ADDRESS_DEFAULT =
+ "0.0.0.0:" + DFS_ROUTER_ADMIN_PORT_DEFAULT;
+ public static final String DFS_ROUTER_ADMIN_BIND_HOST_KEY =
+ FEDERATION_ROUTER_PREFIX + "admin-bind-host";
+ public static final String DFS_ROUTER_ADMIN_ENABLE =
+ FEDERATION_ROUTER_PREFIX + "admin.enable";
+ public static final boolean DFS_ROUTER_ADMIN_ENABLE_DEFAULT = true;
+
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
@Deprecated
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java
new file mode 100644
index 00000000000..96fa794183c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolPB.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.token.TokenInfo;
+
+/**
+ * Protocol that a clients use to communicate with the NameNode.
+ * Note: This extends the protocolbuffer service based interface to
+ * add annotations required for security.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+@KerberosInfo(
+ serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
+@TokenInfo(DelegationTokenSelector.class)
+@ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
+ protocolVersion = 1)
+public interface RouterAdminProtocolPB extends
+ RouterAdminProtocolService.BlockingInterface {
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
new file mode 100644
index 00000000000..415bbd9b5c0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryResponsePBImpl;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class is used on the server side. Calls come across the wire for the for
+ * protocol {@link RouterAdminProtocolPB}. This class translates the PB data
+ * types to the native data types used inside the HDFS Router as specified in
+ * the generic RouterAdminProtocol.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class RouterAdminProtocolServerSideTranslatorPB implements
+ RouterAdminProtocolPB {
+
+ private final RouterAdminServer server;
+
+ /**
+ * Constructor.
+ * @param server The NN server.
+ * @throws IOException
+ */
+ public RouterAdminProtocolServerSideTranslatorPB(RouterAdminServer server)
+ throws IOException {
+ this.server = server;
+ }
+
+ @Override
+ public AddMountTableEntryResponseProto addMountTableEntry(
+ RpcController controller, AddMountTableEntryRequestProto request)
+ throws ServiceException {
+
+ try {
+ AddMountTableEntryRequest req =
+ new AddMountTableEntryRequestPBImpl(request);
+ AddMountTableEntryResponse response = server.addMountTableEntry(req);
+ AddMountTableEntryResponsePBImpl responsePB =
+ (AddMountTableEntryResponsePBImpl)response;
+ return responsePB.getProto();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ /**
+ * Remove an entry from the mount table.
+ */
+ @Override
+ public RemoveMountTableEntryResponseProto removeMountTableEntry(
+ RpcController controller, RemoveMountTableEntryRequestProto request)
+ throws ServiceException {
+ try {
+ RemoveMountTableEntryRequest req =
+ new RemoveMountTableEntryRequestPBImpl(request);
+ RemoveMountTableEntryResponse response =
+ server.removeMountTableEntry(req);
+ RemoveMountTableEntryResponsePBImpl responsePB =
+ (RemoveMountTableEntryResponsePBImpl)response;
+ return responsePB.getProto();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ /**
+ * Get matching mount table entries.
+ */
+ @Override
+ public GetMountTableEntriesResponseProto getMountTableEntries(
+ RpcController controller, GetMountTableEntriesRequestProto request)
+ throws ServiceException {
+ try {
+ GetMountTableEntriesRequest req =
+ new GetMountTableEntriesRequestPBImpl(request);
+ GetMountTableEntriesResponse response = server.getMountTableEntries(req);
+ GetMountTableEntriesResponsePBImpl responsePB =
+ (GetMountTableEntriesResponsePBImpl)response;
+ return responsePB.getProto();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ /**
+ * Update a single mount table entry.
+ */
+ @Override
+ public UpdateMountTableEntryResponseProto updateMountTableEntry(
+ RpcController controller, UpdateMountTableEntryRequestProto request)
+ throws ServiceException {
+ try {
+ UpdateMountTableEntryRequest req =
+ new UpdateMountTableEntryRequestPBImpl(request);
+ UpdateMountTableEntryResponse response =
+ server.updateMountTableEntry(req);
+ UpdateMountTableEntryResponsePBImpl responsePB =
+ (UpdateMountTableEntryResponsePBImpl)response;
+ return responsePB.getProto();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
new file mode 100644
index 00000000000..43663ac5502
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryResponsePBImpl;
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.ProtocolTranslator;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcClientUtil;
+
+import com.google.protobuf.ServiceException;
+
+/**
+ * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
+ * while translating from the parameter types used in ClientProtocol to the
+ * new PB types.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class RouterAdminProtocolTranslatorPB
+ implements ProtocolMetaInterface, MountTableManager,
+ Closeable, ProtocolTranslator {
+ final private RouterAdminProtocolPB rpcProxy;
+
+ public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
+ rpcProxy = proxy;
+ }
+
+ @Override
+ public void close() {
+ RPC.stopProxy(rpcProxy);
+ }
+
+ @Override
+ public Object getUnderlyingProxyObject() {
+ return rpcProxy;
+ }
+
+ @Override
+ public boolean isMethodSupported(String methodName) throws IOException {
+ return RpcClientUtil.isMethodSupported(rpcProxy,
+ RouterAdminProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+ RPC.getProtocolVersion(RouterAdminProtocolPB.class), methodName);
+ }
+
+ @Override
+ public AddMountTableEntryResponse addMountTableEntry(
+ AddMountTableEntryRequest request) throws IOException {
+ AddMountTableEntryRequestPBImpl requestPB =
+ (AddMountTableEntryRequestPBImpl)request;
+ AddMountTableEntryRequestProto proto = requestPB.getProto();
+ try {
+ AddMountTableEntryResponseProto response =
+ rpcProxy.addMountTableEntry(null, proto);
+ return new AddMountTableEntryResponsePBImpl(response);
+ } catch (ServiceException e) {
+ throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+ }
+ }
+
+ @Override
+ public UpdateMountTableEntryResponse updateMountTableEntry(
+ UpdateMountTableEntryRequest request) throws IOException {
+ UpdateMountTableEntryRequestPBImpl requestPB =
+ (UpdateMountTableEntryRequestPBImpl)request;
+ UpdateMountTableEntryRequestProto proto = requestPB.getProto();
+ try {
+ UpdateMountTableEntryResponseProto response =
+ rpcProxy.updateMountTableEntry(null, proto);
+ return new UpdateMountTableEntryResponsePBImpl(response);
+ } catch (ServiceException e) {
+ throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+ }
+ }
+
+ @Override
+ public RemoveMountTableEntryResponse removeMountTableEntry(
+ RemoveMountTableEntryRequest request) throws IOException {
+ RemoveMountTableEntryRequestPBImpl requestPB =
+ (RemoveMountTableEntryRequestPBImpl)request;
+ RemoveMountTableEntryRequestProto proto = requestPB.getProto();
+ try {
+ RemoveMountTableEntryResponseProto responseProto =
+ rpcProxy.removeMountTableEntry(null, proto);
+ return new RemoveMountTableEntryResponsePBImpl(responseProto);
+ } catch (ServiceException e) {
+ throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+ }
+ }
+
+ @Override
+ public GetMountTableEntriesResponse getMountTableEntries(
+ GetMountTableEntriesRequest request) throws IOException {
+ GetMountTableEntriesRequestPBImpl requestPB =
+ (GetMountTableEntriesRequestPBImpl)request;
+ GetMountTableEntriesRequestProto proto = requestPB.getProto();
+ try {
+ GetMountTableEntriesResponseProto response =
+ rpcProxy.getMountTableEntries(null, proto);
+ return new GetMountTableEntriesResponsePBImpl(response);
+ } catch (ServiceException e) {
+ throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
index b0ced24a8ae..d974c78e64c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/MembershipNamenodeResolver.java
@@ -61,7 +61,7 @@ public class MembershipNamenodeResolver
/** Reference to the State Store. */
private final StateStoreService stateStore;
/** Membership State Store interface. */
- private final MembershipStore membershipInterface;
+ private MembershipStore membershipInterface;
/** Parent router ID. */
private String routerId;
@@ -82,25 +82,27 @@ public class MembershipNamenodeResolver
if (this.stateStore != null) {
// Request cache updates from the state store
this.stateStore.registerCacheExternal(this);
+ }
+ }
- // Initialize the interface to get the membership
+ private synchronized MembershipStore getMembershipStore() throws IOException {
+ if (this.membershipInterface == null) {
this.membershipInterface = this.stateStore.getRegisteredRecordStore(
MembershipStore.class);
- } else {
- this.membershipInterface = null;
- }
-
- if (this.membershipInterface == null) {
- throw new IOException("State Store does not have an interface for " +
- MembershipStore.class.getSimpleName());
+ if (this.membershipInterface == null) {
+ throw new IOException("State Store does not have an interface for " +
+ MembershipStore.class.getSimpleName());
+ }
}
+ return this.membershipInterface;
}
@Override
public boolean loadCache(boolean force) {
// Our cache depends on the store, update it first
try {
- this.membershipInterface.loadCache(force);
+ MembershipStore membership = getMembershipStore();
+ membership.loadCache(force);
} catch (IOException e) {
LOG.error("Cannot update membership from the State Store", e);
}
@@ -126,8 +128,9 @@ public class MembershipNamenodeResolver
GetNamenodeRegistrationsRequest request =
GetNamenodeRegistrationsRequest.newInstance(partial);
+ MembershipStore membership = getMembershipStore();
GetNamenodeRegistrationsResponse response =
- this.membershipInterface.getNamenodeRegistrations(request);
+ membership.getNamenodeRegistrations(request);
List records = response.getNamenodeMemberships();
if (records != null && records.size() == 1) {
@@ -135,7 +138,7 @@ public class MembershipNamenodeResolver
UpdateNamenodeRegistrationRequest updateRequest =
UpdateNamenodeRegistrationRequest.newInstance(
record.getNameserviceId(), record.getNamenodeId(), ACTIVE);
- this.membershipInterface.updateNamenodeRegistration(updateRequest);
+ membership.updateNamenodeRegistration(updateRequest);
}
} catch (StateStoreUnavailableException e) {
LOG.error("Cannot update {} as active, State Store unavailable", address);
@@ -226,14 +229,14 @@ public class MembershipNamenodeResolver
NamenodeHeartbeatRequest request = NamenodeHeartbeatRequest.newInstance();
request.setNamenodeMembership(record);
- return this.membershipInterface.namenodeHeartbeat(request).getResult();
+ return getMembershipStore().namenodeHeartbeat(request).getResult();
}
@Override
public Set getNamespaces() throws IOException {
GetNamespaceInfoRequest request = GetNamespaceInfoRequest.newInstance();
GetNamespaceInfoResponse response =
- this.membershipInterface.getNamespaceInfo(request);
+ getMembershipStore().getNamespaceInfo(request);
return response.getNamespaceInfo();
}
@@ -259,8 +262,9 @@ public class MembershipNamenodeResolver
// Retrieve a list of all registrations that match this query.
// This may include all NN records for a namespace/blockpool, including
// duplicate records for the same NN from different routers.
+ MembershipStore membershipStore = getMembershipStore();
GetNamenodeRegistrationsResponse response =
- this.membershipInterface.getNamenodeRegistrations(request);
+ membershipStore.getNamenodeRegistrations(request);
List memberships = response.getNamenodeMemberships();
if (!addExpired || !addUnavailable) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
index 213a58fc110..fcbd2eb3eba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/Router.java
@@ -81,6 +81,10 @@ public class Router extends CompositeService {
private RouterRpcServer rpcServer;
private InetSocketAddress rpcAddress;
+ /** RPC interface for the admin. */
+ private RouterAdminServer adminServer;
+ private InetSocketAddress adminAddress;
+
/** Interface with the State Store. */
private StateStoreService stateStore;
@@ -116,6 +120,14 @@ public class Router extends CompositeService {
protected void serviceInit(Configuration configuration) throws Exception {
this.conf = configuration;
+ if (conf.getBoolean(
+ DFSConfigKeys.DFS_ROUTER_STORE_ENABLE,
+ DFSConfigKeys.DFS_ROUTER_STORE_ENABLE_DEFAULT)) {
+ // Service that maintains the State Store connection
+ this.stateStore = new StateStoreService();
+ addService(this.stateStore);
+ }
+
// Resolver to track active NNs
this.namenodeResolver = newActiveNamenodeResolver(
this.conf, this.stateStore);
@@ -138,6 +150,14 @@ public class Router extends CompositeService {
this.setRpcServerAddress(rpcServer.getRpcAddress());
}
+ if (conf.getBoolean(
+ DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE,
+ DFSConfigKeys.DFS_ROUTER_ADMIN_ENABLE_DEFAULT)) {
+ // Create admin server
+ this.adminServer = createAdminServer();
+ addService(this.adminServer);
+ }
+
if (conf.getBoolean(
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE,
DFSConfigKeys.DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT)) {
@@ -263,6 +283,38 @@ public class Router extends CompositeService {
return this.rpcAddress;
}
+ /////////////////////////////////////////////////////////
+ // Admin server
+ /////////////////////////////////////////////////////////
+
+ /**
+ * Create a new router admin server to handle the router admin interface.
+ *
+ * @return RouterAdminServer
+ * @throws IOException If the admin server was not successfully started.
+ */
+ protected RouterAdminServer createAdminServer() throws IOException {
+ return new RouterAdminServer(this.conf, this);
+ }
+
+ /**
+ * Set the current Admin socket for the router.
+ *
+ * @param adminAddress Admin RPC address.
+ */
+ protected void setAdminServerAddress(InetSocketAddress address) {
+ this.adminAddress = address;
+ }
+
+ /**
+ * Get the current Admin socket address for the router.
+ *
+ * @return InetSocketAddress Admin address.
+ */
+ public InetSocketAddress getAdminServerAddress() {
+ return adminAddress;
+ }
+
/////////////////////////////////////////////////////////
// Namenode heartbeat monitors
/////////////////////////////////////////////////////////
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
new file mode 100644
index 00000000000..7687216a98a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.proto.RouterProtocolProtos.RouterAdminProtocolService;
+import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolServerSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.store.MountTableStore;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RPC.Server;
+import org.apache.hadoop.service.AbstractService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.BlockingService;
+
+/**
+ * This class is responsible for handling all of the Admin calls to the HDFS
+ * router. It is created, started, and stopped by {@link Router}.
+ */
+public class RouterAdminServer extends AbstractService
+ implements MountTableManager {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(RouterAdminServer.class);
+
+ private Configuration conf;
+
+ private final Router router;
+
+ private MountTableStore mountTableStore;
+
+ /** The Admin server that listens to requests from clients. */
+ private final Server adminServer;
+ private final InetSocketAddress adminAddress;
+
+ public RouterAdminServer(Configuration conf, Router router)
+ throws IOException {
+ super(RouterAdminServer.class.getName());
+
+ this.conf = conf;
+ this.router = router;
+
+ int handlerCount = this.conf.getInt(
+ DFSConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_KEY,
+ DFSConfigKeys.DFS_ROUTER_ADMIN_HANDLER_COUNT_DEFAULT);
+
+ RPC.setProtocolEngine(this.conf, RouterAdminProtocolPB.class,
+ ProtobufRpcEngine.class);
+
+ RouterAdminProtocolServerSideTranslatorPB routerAdminProtocolTranslator =
+ new RouterAdminProtocolServerSideTranslatorPB(this);
+ BlockingService clientNNPbService = RouterAdminProtocolService.
+ newReflectiveBlockingService(routerAdminProtocolTranslator);
+
+ InetSocketAddress confRpcAddress = conf.getSocketAddr(
+ DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY,
+ DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_KEY,
+ DFSConfigKeys.DFS_ROUTER_ADMIN_ADDRESS_DEFAULT,
+ DFSConfigKeys.DFS_ROUTER_ADMIN_PORT_DEFAULT);
+
+ String bindHost = conf.get(
+ DFSConfigKeys.DFS_ROUTER_ADMIN_BIND_HOST_KEY,
+ confRpcAddress.getHostName());
+ LOG.info("Admin server binding to {}:{}",
+ bindHost, confRpcAddress.getPort());
+
+ this.adminServer = new RPC.Builder(this.conf)
+ .setProtocol(RouterAdminProtocolPB.class)
+ .setInstance(clientNNPbService)
+ .setBindAddress(bindHost)
+ .setPort(confRpcAddress.getPort())
+ .setNumHandlers(handlerCount)
+ .setVerbose(false)
+ .build();
+
+ // The RPC-server port can be ephemeral... ensure we have the correct info
+ InetSocketAddress listenAddress = this.adminServer.getListenerAddress();
+ this.adminAddress = new InetSocketAddress(
+ confRpcAddress.getHostName(), listenAddress.getPort());
+ router.setAdminServerAddress(this.adminAddress);
+ }
+
+ /** Allow access to the client RPC server for testing. */
+ @VisibleForTesting
+ Server getAdminServer() {
+ return this.adminServer;
+ }
+
+ private MountTableStore getMountTableStore() throws IOException {
+ if (this.mountTableStore == null) {
+ this.mountTableStore = router.getStateStore().getRegisteredRecordStore(
+ MountTableStore.class);
+ if (this.mountTableStore == null) {
+ throw new IOException("Mount table state store is not available.");
+ }
+ }
+ return this.mountTableStore;
+ }
+
+ /**
+ * Get the RPC address of the admin service.
+ * @return Administration service RPC address.
+ */
+ public InetSocketAddress getRpcAddress() {
+ return this.adminAddress;
+ }
+
+ @Override
+ protected void serviceInit(Configuration configuration) throws Exception {
+ this.conf = configuration;
+ super.serviceInit(conf);
+ }
+
+ @Override
+ protected void serviceStart() throws Exception {
+ this.adminServer.start();
+ super.serviceStart();
+ }
+
+ @Override
+ protected void serviceStop() throws Exception {
+ if (this.adminServer != null) {
+ this.adminServer.stop();
+ }
+ super.serviceStop();
+ }
+
+ @Override
+ public AddMountTableEntryResponse addMountTableEntry(
+ AddMountTableEntryRequest request) throws IOException {
+ return getMountTableStore().addMountTableEntry(request);
+ }
+
+ @Override
+ public UpdateMountTableEntryResponse updateMountTableEntry(
+ UpdateMountTableEntryRequest request) throws IOException {
+ return getMountTableStore().updateMountTableEntry(request);
+ }
+
+ @Override
+ public RemoveMountTableEntryResponse removeMountTableEntry(
+ RemoveMountTableEntryRequest request) throws IOException {
+ return getMountTableStore().removeMountTableEntry(request);
+ }
+
+ @Override
+ public GetMountTableEntriesResponse getMountTableEntries(
+ GetMountTableEntriesRequest request) throws IOException {
+ return getMountTableStore().getMountTableEntries(request);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
new file mode 100644
index 00000000000..1f76b980108
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClient.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolPB;
+import org.apache.hadoop.hdfs.protocolPB.RouterAdminProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * Client to connect to the {@link Router} via the admin protocol.
+ */
+@Private
+public class RouterClient implements Closeable {
+
+ private final RouterAdminProtocolTranslatorPB proxy;
+ private final UserGroupInformation ugi;
+
+ private static RouterAdminProtocolTranslatorPB createRouterProxy(
+ InetSocketAddress address, Configuration conf, UserGroupInformation ugi)
+ throws IOException {
+
+ RPC.setProtocolEngine(
+ conf, RouterAdminProtocolPB.class, ProtobufRpcEngine.class);
+
+ AtomicBoolean fallbackToSimpleAuth = new AtomicBoolean(false);
+ final long version = RPC.getProtocolVersion(RouterAdminProtocolPB.class);
+ RouterAdminProtocolPB proxy = RPC.getProtocolProxy(
+ RouterAdminProtocolPB.class, version, address, ugi, conf,
+ NetUtils.getDefaultSocketFactory(conf),
+ RPC.getRpcTimeout(conf), null,
+ fallbackToSimpleAuth).getProxy();
+
+ return new RouterAdminProtocolTranslatorPB(proxy);
+ }
+
+ public RouterClient(InetSocketAddress address, Configuration conf)
+ throws IOException {
+ this.ugi = UserGroupInformation.getCurrentUser();
+ this.proxy = createRouterProxy(address, conf, ugi);
+ }
+
+ public MountTableManager getMountTableManager() {
+ return proxy;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ RPC.stopProxy(proxy);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
new file mode 100644
index 00000000000..07864197f61
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -0,0 +1,341 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.federation;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
+import org.apache.hadoop.hdfs.server.federation.router.RouterClient;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class provides some Federation administrative access shell commands.
+ */
+@Private
+public class RouterAdmin extends Configured implements Tool {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RouterAdmin.class);
+
+ private RouterClient client;
+
+ public static void main(String[] argv) throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ RouterAdmin admin = new RouterAdmin(conf);
+
+ int res = ToolRunner.run(admin, argv);
+ System.exit(res);
+ }
+
+ public RouterAdmin(Configuration conf) {
+ super(conf);
+ }
+
+ /**
+ * Print the usage message.
+ */
+ public void printUsage() {
+ String usage = "Federation Admin Tools:\n"
+ + "\t[-add