From db4d28d3bbfa5f67be482ec433a308e249a30897 Mon Sep 17 00:00:00 2001
From: Eli Collins
Date: Mon, 6 Feb 2012 08:56:06 +0000
Subject: [PATCH] HDFS-2893. svn merge -c 1240928 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1240930 13f79535-47bb-0310-9956-ffa450edef68
---
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
.../hadoop-hdfs/src/main/bin/start-dfs.sh | 15 +++------------
.../hadoop-hdfs/src/main/bin/stop-dfs.sh | 13 ++-----------
.../content/xdocs/hdfs_user_guide.xml | 4 +---
4 files changed, 9 insertions(+), 26 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ed1d069798..04acb3aef9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -219,6 +219,9 @@ Release 0.23.1 - UNRELEASED
HDFS-2889. getNumCurrentReplicas is package private but should be public on
0.23 (see HDFS-2408). (Gregory Chanan via atm)
+ HDFS-2893. The start/stop scripts don't start/stop the 2NN when
+ using the default configuration. (eli)
+
Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
index d6d03f7f8fb..d267e4cd7c3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
@@ -59,7 +59,7 @@ echo "Starting namenodes on [$NAMENODES]"
--script "$bin/hdfs" start namenode $nameStartOpt
#---------------------------------------------------------
-# datanodes (using defalut slaves file)
+# datanodes (using default slaves file)
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
echo \
@@ -74,22 +74,13 @@ fi
#---------------------------------------------------------
# secondary namenodes (if any)
-# if there are no secondary namenodes configured it returns
-# 0.0.0.0 or empty string
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
-SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
-if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
- echo \
- "Secondary namenodes are not configured. " \
- "Cannot start secondary namenodes."
-else
- echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" start secondarynamenode
-fi
# eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
index 11788e24b71..33967513c4c 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
@@ -50,22 +50,13 @@ fi
#---------------------------------------------------------
# secondary namenodes (if any)
-# if there are no secondary namenodes configured it returns
-# 0.0.0.0 or empty string
SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
-SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
-if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
- echo \
- "Secondary namenodes are not configured. " \
- "Cannot stop secondary namenodes."
-else
- echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
- "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
--config "$HADOOP_CONF_DIR" \
--hostnames "$SECONDARY_NAMENODES" \
--script "$bin/hdfs" stop secondarynamenode
-fi
# eof
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
index 4d2c6dd0735..976800e0350 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_user_guide.xml
@@ -253,9 +253,7 @@
The secondary NameNode merges the fsimage and the edits log files periodically
and keeps edits log size within a limit. It is usually run on a
different machine than the primary NameNode since its memory requirements
- are on the same order as the primary NameNode. The secondary
- NameNode is started by bin/start-dfs.sh
on the nodes
- specified in conf/masters
file.
+ are on the same order as the primary NameNode.
The start of the checkpoint process on the secondary NameNode is