HDFS-12375. Fail to start/stop journalnodes using start-dfs.sh/stop-dfs.sh. Contributed by Bharat Viswanadham.

This commit is contained in:
Anu Engineer 2017-09-23 15:05:25 -07:00
parent ecc85620e8
commit 415e5a157a
2 changed files with 21 additions and 27 deletions

View File

@ -144,22 +144,19 @@ fi
#---------------------------------------------------------
# quorumjournal nodes (if any)
SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
case "${SHARED_EDITS_DIR}" in
qjournal://*)
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
echo "Starting journal nodes [${JOURNAL_NODES}]"
if [[ "{$JOURNAL_NODES-}" != $'\n' ]]; then
echo "Starting journal nodes [${JOURNAL_NODES}]"
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${JOURNAL_NODES}" \
--daemon start \
journalnode
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
;;
esac
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${JOURNAL_NODES}" \
--daemon start \
journalnode
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi
#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled

View File

@ -100,21 +100,18 @@ fi
#---------------------------------------------------------
# quorumjournal nodes (if any)
SHARED_EDITS_DIR=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey dfs.namenode.shared.edits.dir 2>&-)
JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-)
case "${SHARED_EDITS_DIR}" in
qjournal://*)
JOURNAL_NODES=$(echo "${SHARED_EDITS_DIR}" | sed 's,qjournal://\([^/]*\)/.*,\1,g; s/;/ /g; s/:[0-9]*//g')
echo "Stopping journal nodes [${JOURNAL_NODES}]"
if [[ "{$JOURNAL_NODES-}" != $'\n' ]]; then
echo "Stopping journal nodes [${JOURNAL_NODES}]"
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${JOURNAL_NODES}" \
--daemon stop \
journalnode
;;
esac
hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \
--workers \
--config "${HADOOP_CONF_DIR}" \
--hostnames "${JOURNAL_NODES}" \
--daemon stop \
journalnode
fi
#---------------------------------------------------------
# ZK Failover controllers, if auto-HA is enabled