HDFS-12577. Rename Router tooling. Contributed by Inigo Goiri.

(cherry picked from commit 53e8d0d030525e4c7f3875e23807c6dbe778890f)
(cherry picked from commit 5d63a388d1c3ec8a658cb2fd9b34c240bddf15a0)
This commit is contained in:
Inigo Goiri 2017-10-06 17:31:53 -07:00 committed by vrushali
parent 1772d4563d
commit c954e6b7b2
4 changed files with 88 additions and 50 deletions

View File

@ -59,7 +59,7 @@ if "%1" == "--loglevel" (
) )
) )
set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router federation debug set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto dfsrouter dfsrouteradmin debug
for %%i in ( %hdfscommands% ) do ( for %%i in ( %hdfscommands% ) do (
if %hdfs-command% == %%i set hdfscommand=true if %hdfs-command% == %%i set hdfscommand=true
) )
@ -179,12 +179,12 @@ goto :eof
set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin set CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin
goto :eof goto :eof
:router :dfsrouter
set CLASS=org.apache.hadoop.hdfs.server.federation.router.Router set CLASS=org.apache.hadoop.hdfs.server.federation.router.DFSRouter
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
goto :eof goto :eof
:federation :dfsrouteradmin
set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin set CLASS=org.apache.hadoop.hdfs.tools.federation.RouterAdmin
set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS% set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_ROUTER_OPTS%
goto :eof goto :eof
@ -229,7 +229,8 @@ goto :eof
@echo secondarynamenode run the DFS secondary namenode @echo secondarynamenode run the DFS secondary namenode
@echo namenode run the DFS namenode @echo namenode run the DFS namenode
@echo journalnode run the DFS journalnode @echo journalnode run the DFS journalnode
@echo router run the DFS router @echo dfsrouter run the DFS router
@echo dfsrouteradmin manage Router-based federation
@echo zkfc run the ZK Failover Controller daemon @echo zkfc run the ZK Failover Controller daemon
@echo datanode run a DFS datanode @echo datanode run a DFS datanode
@echo dfsadmin run a DFS admin client @echo dfsadmin run a DFS admin client

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.federation.router;
import static org.apache.hadoop.util.ExitUtil.terminate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.service.CompositeService.CompositeServiceShutdownHook;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tool to start the {@link Router} for Router-based federation.
*/
public final class DFSRouter {
private static final Logger LOG = LoggerFactory.getLogger(DFSRouter.class);
/** Usage string for help message. */
private static final String USAGE = "Usage: hdfs dfsrouter";
/** Priority of the Router shutdown hook. */
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
private DFSRouter() {
// This is just a class to trigger the Router
}
/**
* Main run loop for the router.
*
* @param argv parameters.
*/
public static void main(String[] argv) {
if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(Router.class, argv, LOG);
Router router = new Router();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
Configuration conf = new HdfsConfiguration();
router.init(conf);
router.start();
} catch (Throwable e) {
LOG.error("Failed to start router", e);
terminate(1, e);
}
}
}

View File

@ -19,7 +19,6 @@
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver; import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newActiveNamenodeResolver;
import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver; import static org.apache.hadoop.hdfs.server.federation.router.FederationUtil.newFileSubclusterResolver;
import static org.apache.hadoop.util.ExitUtil.terminate;
import java.io.IOException; import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
@ -35,7 +34,6 @@
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics; import org.apache.hadoop.hdfs.server.federation.metrics.FederationMetrics;
import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
@ -44,8 +42,6 @@
import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.CompositeService;
import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.JvmPauseMonitor;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -110,12 +106,6 @@ public class Router extends CompositeService {
private JvmPauseMonitor pauseMonitor; private JvmPauseMonitor pauseMonitor;
/** Usage string for help message. */
private static final String USAGE = "Usage: java Router";
/** Priority of the Router shutdown hook. */
public static final int SHUTDOWN_HOOK_PRIORITY = 30;
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////
// Constructor // Constructor
@ -250,35 +240,6 @@ public void run() {
}.start(); }.start();
} }
/**
* Main run loop for the router.
*
* @param argv parameters.
*/
public static void main(String[] argv) {
if (DFSUtil.parseHelpArgument(argv, Router.USAGE, System.out, true)) {
System.exit(0);
}
try {
StringUtils.startupShutdownMessage(Router.class, argv, LOG);
Router router = new Router();
ShutdownHookManager.get().addShutdownHook(
new CompositeServiceShutdownHook(router), SHUTDOWN_HOOK_PRIORITY);
Configuration conf = new HdfsConfiguration();
router.init(conf);
router.start();
} catch (Throwable e) {
LOG.error("Failed to start router", e);
terminate(1, e);
}
}
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////
// RPC Server // RPC Server
///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////

View File

@ -164,11 +164,11 @@ The rest of the options are documented in [hdfs-default.xml](./hdfs-default.xml)
Once the Router is configured, it can be started: Once the Router is configured, it can be started:
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs start router [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs start dfsrouter
And to stop it: And to stop it:
[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs stop router [hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --script $HADOOP_PREFIX/bin/hdfs stop dfsrouter
### Mount table management ### Mount table management
@ -179,10 +179,10 @@ For example, if we to mount `/data/app1` in the federated namespace, it is recom
The federation admin tool supports managing the mount table. The federation admin tool supports managing the mount table.
For example, to create three mount points and list them: For example, to create three mount points and list them:
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /tmp ns1 /tmp [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /tmp ns1 /tmp
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app1 ns2 /data/app1 [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app1 ns2 /data/app1
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -add /data/app2 ns3 /data/app2 [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app2 ns3 /data/app2
[hdfs]$ $HADOOP_HOME/bin/hdfs federation -ls [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -ls
If a mount point is not set, the Router will map it to the default namespace `dfs.federation.router.default.nameserviceId`. If a mount point is not set, the Router will map it to the default namespace `dfs.federation.router.default.nameserviceId`.