HADOOP-8925. Remove the packaging. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1399889 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-10-18 22:30:11 +00:00
parent 4af8761e4c
commit 57ee22e589
64 changed files with 2 additions and 6810 deletions

View File

@ -328,6 +328,8 @@ Release 2.0.3-alpha - Unreleased
HADOOP-8931. Add Java version to startup message. (eli) HADOOP-8931. Add Java version to startup message. (eli)
HADOOP-8925. Remove the packaging. (eli)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang

View File

@ -1,15 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/etc/hadoop

View File

@ -1,24 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Package: hadoop-common
Version: @version@
Section: misc
Priority: optional
Provides: hadoop-common
Architecture: all
Depends: openjdk-6-jre-headless
Maintainer: Apache Software Foundation <general@hadoop.apache.org>
Description: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.
Distribution: development

View File

@ -1,24 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-hadoop-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/usr/sbin/groupdel hadoop 2> /dev/null >dev/null
exit 0

View File

@ -1,18 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -g 123 -r hadoop

View File

@ -1,25 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-hadoop-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop \
--uninstal

View File

@ -1,151 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-datanode
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Name Node server
### END INIT INFO
set -e
# /etc/init.d/hadoop-datanode: start and stop the Apache Hadoop Data Node daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
DN_USER="root"
IDENT_USER=${HADOOP_SECURE_DN_USER}
else
DN_USER="hdfs"
IDENT_USER=${DN_USER}
fi
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-datanode_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-datanode_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Data Node server not in use (/etc/hadoop/hadoop-datanode_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Data Node server" "hadoop-datanode"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid -c ${DN_USER} -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Data Node server" "hadoop-datanode"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid -c ${DN_USER} -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid -c ${DN_USER} -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-${IDENT_USER}-datanode.pid ${JAVA_HOME}/bin/java hadoop-datanode && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-datanode {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,143 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-jobtracker
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Job Tracker server
### END INIT INFO
set -e
# /etc/init.d/hadoop-jobtracker: start and stop the Apache Hadoop Job Tracker daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-jobtracker_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-jobtracker_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Job Tracker server not in use (/etc/hadoop/hadoop-jobtracker_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Job Tracker server" "hadoop-jobtracker"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid ${JAVA_HOME}/bin/java hadoop-jobtracker && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-jobtracker {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,155 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-namenode
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Name Node server
### END INIT INFO
set -e
# /etc/init.d/hadoop-namenode: start and stop the Apache Hadoop Name Node daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-namenode_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-namenode_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Name Node server not in use (/etc/hadoop/hadoop-namenode_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
format() {
sudo -u hdfs ${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} namenode -format
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Name Node server" "hadoop-namenode"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Name Node server" "hadoop-namenode"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
format)
log_daemon_msg "Formatting Apache Hadoop Name Node" "hadoop-namenode"
format
if [ $? -eq 0 ]; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid ${JAVA_HOME}/bin/java hadoop-namenode && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-namenode {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,143 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-tasktracker
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Task Tracker server
### END INIT INFO
set -e
# /etc/init.d/hadoop-tasktracker: start and stop the Apache Hadoop Task Tracker daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-tasktracker_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-tasktracker_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Task Tracker server not in use (/etc/hadoop/hadoop-tasktracker_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Task Tracker server" "hadoop-tasktracker"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid ${JAVA_HOME}/bin/java hadoop-tasktracker && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-tasktracker {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,123 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
script="$(basename -- "$this")"
this="$bin/$script"
if [ "$HADOOP_HOME" != "" ]; then
echo "Warning: \$HADOOP_HOME is deprecated."
echo
fi
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Require parameter:
--config /etc/hadoop Location of Hadoop configuration file
-u <username> Create user on HDFS
Optional parameters:
-h Display this message
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
--super-user=hdfs Set super user id
--super-user-keytab=/etc/security/keytabs/hdfs.keytab Set super user keytab location
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'kerberos-realm:' \
-l 'super-user:' \
-l 'super-user-keytab:' \
-o 'h' \
-o 'u' \
-- "$@")
if [ $? != 0 ] ; then
usage
exit 1
fi
create_user() {
if [ "${SETUP_USER}" = "" ]; then
break
fi
HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
export HADOOP_PREFIX
export HADOOP_CONF_DIR
export JAVA_HOME
export SETUP_USER=${SETUP_USER}
export SETUP_PATH=/user/${SETUP_USER}
if [ ! "${KERBEROS_REALM}" = "" ]; then
# locate kinit cmd
if [ -e /etc/lsb-release ]; then
KINIT_CMD="/usr/bin/kinit -kt ${HDFS_USER_KEYTAB} ${HADOOP_HDFS_USER}"
else
KINIT_CMD="/usr/kerberos/bin/kinit -kt ${HDFS_USER_KEYTAB} ${HADOOP_HDFS_USER}"
fi
su -c "${KINIT_CMD}" ${HADOOP_HDFS_USER}
fi
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} fs -mkdir ${SETUP_PATH}" ${HADOOP_HDFS_USER}
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} fs -chown ${SETUP_USER}:${SETUP_USER} ${SETUP_PATH}" ${HADOOP_HDFS_USER}
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} fs -chmod 711 ${SETUP_PATH}" ${HADOOP_HDFS_USER}
if [ "$?" == "0" ]; then
echo "User directory has been setup: ${SETUP_PATH}"
fi
}
eval set -- "${OPTS}"
while true; do
case "$1" in
-u)
shift
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
;;
--super-user)
HADOOP_HDFS_USER=$2; shift 2
;;
--super-user-keytab)
HDFS_USER_KEYTAB=$2; shift 2
;;
-h)
usage
;;
--)
while shift; do
SETUP_USER=$1
create_user
done
break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done

View File

@ -1,142 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
this="${BASH_SOURCE-$0}"
bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
script="$(basename -- "$this")"
this="$bin/$script"
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Require parameter:
--config /etc/hadoop Location of Hadoop configuration file
--apps=<csl of apps:user hcat:hcat,hbase,hive:user> Apps you want to setup on hdfs
If user is not specified, app name
will be used as the user name as well
Optional parameters:
-h Display this message
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
--super-user=hdfs Set super user id
--super-user-keytab=/etc/security/keytabs/hdfs.keytab Set super user keytab location
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'kerberos-realm:' \
-l 'super-user:' \
-l 'super-user-keytab:' \
-l 'apps:' \
-o 'h' \
-- "$@")
if [ $? != 0 ] ; then
usage
exit 1
fi
function setup_apps
{
if [ -z $APPS ]
then
usage
break
fi
#if super user is not set default to hdfs
HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
if [ ! "${KERBEROS_REALM}" = "" ]; then
# locate kinit cmd
if [ -e /etc/lsb-release ]; then
KINIT_CMD="/usr/bin/kinit -kt ${HDFS_USER_KEYTAB} ${HADOOP_HDFS_USER}"
else
KINIT_CMD="/usr/kerberos/bin/kinit -kt ${HDFS_USER_KEYTAB} ${HADOOP_HDFS_USER}"
fi
su -c "${KINIT_CMD}" ${HADOOP_HDFS_USER}
fi
#process each app
oldIFS=$IFS
IFS=','
for app in $APPS
do
IFS=":"
arr=($app)
app=${arr[0]}
user=${arr[1]}
IFS=','
#if user is empty, default it to app
if [ -z $user ]
then
user=$app
fi
path="/apps/${app}"
#create the dir
cmd="su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir ${path}' ${HADOOP_HDFS_USER}"
echo $cmd
eval $cmd
#make owner to be the app
cmd="su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chown ${user} ${path}' ${HADOOP_HDFS_USER}"
echo $cmd
eval $cmd
if [ "$?" == "0" ]; then
echo "App directory has been setup: ${path}"
fi
done
IFS=$oldIFS
}
eval set -- "${OPTS}"
while true; do
case "$1" in
--apps)
APPS=$2; shift 2
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
;;
--super-user)
HADOOP_HDFS_USER=$2; shift 2
;;
--super-user-keytab)
HDFS_USER_KEYTAB=$2; shift 2
;;
-h)
usage
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
setup_apps

View File

@ -1,707 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
this="${BASH_SOURCE-$0}"
HADOOP_DEFAULT_PREFIX=`dirname "$this"`/..
HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
export HADOOP_PREFIX
usage() {
echo "
usage: $0 <parameters>
Optional parameters:
--auto Setup path and configuration automatically
--default Setup configuration as default
--conf-dir=/etc/hadoop Set configuration directory
--datanode-dir=/var/lib/hadoop/hdfs/datanode Set datanode directory
--group=hadoop Set Hadoop group name
-h Display this message
--hdfs-user=hdfs Set HDFS user
--jobtracker-host=hostname Set jobtracker host
--namenode-host=hostname Set namenode host
--secondarynamenode-host=hostname Set secondary namenode host
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
--kinit-location=/usr/kerberos/bin/kinit Set kinit location
--keytab-dir=/etc/security/keytabs Set keytab directory
--log-dir=/var/log/hadoop Set log directory
--pid-dir=/var/run/hadoop Set pid directory
--hdfs-dir=/var/lib/hadoop/hdfs Set HDFS directory
--hdfs-user-keytab=/home/hdfs/hdfs.keytab Set HDFS user key tab
--mapred-dir=/var/lib/hadoop/mapred Set mapreduce directory
--mapreduce-user=mr Set mapreduce user
--mapreduce-user-keytab=/home/mr/hdfs.keytab Set mapreduce user key tab
--namenode-dir=/var/lib/hadoop/hdfs/namenode Set namenode directory
--replication=3 Set replication factor
--taskscheduler=org.apache.hadoop.mapred.JobQueueTaskScheduler Set task scheduler
--datanodes=hostname1,hostname2,... SET the datanodes
--tasktrackers=hostname1,hostname2,... SET the tasktrackers
--dfs-webhdfs-enabled=false|true Enable webhdfs
--dfs-support-append=false|true Enable append
--hadoop-proxy-users='user1:groups:hosts;user2:groups:hosts' Setup proxy users for hadoop
--hbase-user=hbase User which hbase is running as. Defaults to hbase
--mapreduce-cluster-mapmemory-mb=memory Virtual memory of a map slot for the MR framework. Defaults to -1
--mapreduce-cluster-reducememory-mb=memory Virtual memory, of a reduce slot for the MR framework. Defaults to -1
--mapreduce-jobtracker-maxmapmemory-mb=memory Maximum virtual memory of a single map task. Defaults to -1
This value should be set to (mapreduce.cluster.mapmemory.mb * mapreduce.tasktracker.map.tasks.maximum)
--mapreduce-jobtracker-maxreducememory-mb=memory Maximum virtual memory of a single reduce task. Defaults to -1
This value should be set to (mapreduce.cluster.reducememory.mb * mapreduce.tasktracker.reduce.tasks.maximum)
--mapreduce-map-memory-mb=memory Virtual memory of a single map slot for a job. Defaults to -1
This value should be <= mapred.cluster.max.map.memory.mb
--mapreduce-reduce-memory-mb=memory Virtual memory, of a single reduce slot for a job. Defaults to -1
This value should be <= mapred.cluster.max.reduce.memory.mb
--dfs-datanode-dir-perm=700 Set the permission for the datanode data directories. Defaults to 700
--dfs-block-local-path-access-user=user User for which you want to enable shortcircuit read.
--dfs-client-read-shortcircuit=true/false Enable shortcircuit read for the client. Will default to true if the shortcircuit user is set.
--dfs-client-read-shortcircuit-skip-checksum=false/true Disable checking of checksum when shortcircuit read is taking place. Defaults to false.
"
exit 1
}
check_permission() {
TARGET=$1
OWNER="0"
RESULT=0
while [ "$TARGET" != "/" ]; do
if [ "`uname`" = "Darwin" ]; then
OWNER=`stat -f %u $TARGET`
else
OWNER=`stat -c %u $TARGET`
fi
if [ "$OWNER" != "0" ]; then
RESULT=1
break
fi
TARGET=`dirname $TARGET`
done
return $RESULT
}
template_generator() {
REGEX='(\$\{[a-zA-Z_][a-zA-Z_0-9]*\})'
if [ -e $2 ]; then
mv -f $2 "$2.bak"
fi
cat $1 |
while read line ; do
while [[ "$line" =~ $REGEX ]] ; do
LHS=${BASH_REMATCH[1]}
RHS="$(eval echo "\"$LHS\"")"
line=${line//$LHS/$RHS}
done
echo $line >> $2
done
}
#########################################
# Function to modify a value of a field in an xml file
# Params: $1 is the file with full path; $2 is the property, $3 is the new value
#########################################
function addPropertyToXMLConf
{
#read the file name with full path
local file=$1
#get the property name
local property=$2
#get what value should be set for that
local propValue=$3
#get the description
local desc=$4
#get the value for the final tag
local finalVal=$5
#create the property text, make sure the / are escaped
propText="<property>\n<name>$property<\/name>\n<value>$propValue<\/value>\n"
#if description is not empty add it
if [ ! -z $desc ]
then
propText="${propText}<description>$desc<\/description>\n"
fi
#if final is not empty add it
if [ ! -z $finalVal ]
then
propText="${propText}final>$finalVal<\/final>\n"
fi
#add the ending tag
propText="${propText}<\/property>\n"
#add the property to the file
endText="<\/configuration>"
#add the text using sed at the end of the file
sed -i "s|$endText|$propText$endText|" $file
}
##########################################
# Function to setup up the short circuit read settings
#########################################
function setupShortCircuitRead
{
local conf_file="${HADOOP_CONF_DIR}/hdfs-site.xml"
#if the shortcircuit user is not set then return
if [ -z $DFS_BLOCK_LOCAL_PATH_ACCESS_USER ]
then
return
fi
#set the defaults if values not present
DFS_CLIENT_READ_SHORTCIRCUIT=${DFS_CLIENT_READ_SHORTCIRCUIT:-false}
DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM=${DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM:-false}
#add the user to the conf file
addPropertyToXMLConf "$conf_file" "dfs.block.local-path-access.user" "$DFS_BLOCK_LOCAL_PATH_ACCESS_USER"
addPropertyToXMLConf "$conf_file" "dfs.client.read.shortcircuit" "$DFS_CLIENT_READ_SHORTCIRCUIT"
addPropertyToXMLConf "$conf_file" "dfs.client.read.shortcircuit.skip.checksum" "$DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM"
}
##########################################
# Function to setup up the proxy user settings
#########################################
function setupProxyUsers
{
local conf_file="${HADOOP_CONF_DIR}/core-site.xml"
#if hadoop proxy users are sent, setup hadoop proxy
if [ ! -z $HADOOP_PROXY_USERS ]
then
oldIFS=$IFS
IFS=';'
#process each proxy config
for proxy in $HADOOP_PROXY_USERS
do
#get the user, group and hosts information for each proxy
IFS=':'
arr=($proxy)
user="${arr[0]}"
groups="${arr[1]}"
hosts="${arr[2]}"
#determine the property names and values
proxy_groups_property="hadoop.proxyuser.${user}.groups"
proxy_groups_val="$groups"
addPropertyToXMLConf "$conf_file" "$proxy_groups_property" "$proxy_groups_val"
proxy_hosts_property="hadoop.proxyuser.${user}.hosts"
proxy_hosts_val="$hosts"
addPropertyToXMLConf "$conf_file" "$proxy_hosts_property" "$proxy_hosts_val"
IFS=';'
done
IFS=$oldIFS
fi
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'auto' \
-l 'java-home:' \
-l 'conf-dir:' \
-l 'default' \
-l 'group:' \
-l 'hdfs-dir:' \
-l 'namenode-dir:' \
-l 'datanode-dir:' \
-l 'mapred-dir:' \
-l 'namenode-host:' \
-l 'secondarynamenode-host:' \
-l 'jobtracker-host:' \
-l 'log-dir:' \
-l 'pid-dir:' \
-l 'replication:' \
-l 'taskscheduler:' \
-l 'hdfs-user:' \
-l 'hdfs-user-keytab:' \
-l 'mapreduce-user:' \
-l 'mapreduce-user-keytab:' \
-l 'keytab-dir:' \
-l 'kerberos-realm:' \
-l 'kinit-location:' \
-l 'datanodes:' \
-l 'tasktrackers:' \
-l 'dfs-webhdfs-enabled:' \
-l 'hadoop-proxy-users:' \
-l 'dfs-support-append:' \
-l 'hbase-user:' \
-l 'mapreduce-cluster-mapmemory-mb:' \
-l 'mapreduce-cluster-reducememory-mb:' \
-l 'mapreduce-jobtracker-maxmapmemory-mb:' \
-l 'mapreduce-jobtracker-maxreducememory-mb:' \
-l 'mapreduce-map-memory-mb:' \
-l 'mapreduce-reduce-memory-mb:' \
-l 'dfs-datanode-dir-perm:' \
-l 'dfs-block-local-path-access-user:' \
-l 'dfs-client-read-shortcircuit:' \
-l 'dfs-client-read-shortcircuit-skip-checksum:' \
-o 'h' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
# Make sure the HADOOP_LOG_DIR is not picked up from user environment.
unset HADOOP_LOG_DIR
eval set -- "${OPTS}"
while true ; do
case "$1" in
--auto)
AUTOSETUP=1
AUTOMATED=1
shift
;;
--java-home)
JAVA_HOME=$2; shift 2
AUTOMATED=1
;;
--conf-dir)
HADOOP_CONF_DIR=$2; shift 2
AUTOMATED=1
;;
--default)
AUTOMATED=1; shift
;;
--group)
HADOOP_GROUP=$2; shift 2
AUTOMATED=1
;;
-h)
usage
;;
--hdfs-dir)
HADOOP_HDFS_DIR=$2; shift 2
AUTOMATED=1
;;
--namenode-dir)
HADOOP_NN_DIR=$2; shift 2
AUTOMATED=1
;;
--datanode-dir)
HADOOP_DN_DIR=$2; shift 2
AUTOMATED=1
;;
--mapred-dir)
HADOOP_MAPRED_DIR=$2; shift 2
AUTOMATED=1
;;
--namenode-host)
HADOOP_NN_HOST=$2; shift 2
AUTOMATED=1
;;
--secondarynamenode-host)
HADOOP_SNN_HOST=$2; shift 2
AUTOMATED=1
;;
--jobtracker-host)
HADOOP_JT_HOST=$2; shift 2
AUTOMATED=1
;;
--log-dir)
HADOOP_LOG_DIR=$2; shift 2
AUTOMATED=1
;;
--pid-dir)
HADOOP_PID_DIR=$2; shift 2
AUTOMATED=1
;;
--replication)
HADOOP_REPLICATION=$2; shift 2
AUTOMATED=1
;;
--taskscheduler)
HADOOP_TASK_SCHEDULER=$2; shift 2
AUTOMATED=1
;;
--hdfs-user)
HADOOP_HDFS_USER=$2; shift 2
AUTOMATED=1
;;
--mapreduce-user)
HADOOP_MR_USER=$2; shift 2
AUTOMATED=1
;;
--keytab-dir)
KEYTAB_DIR=$2; shift 2
AUTOMATED=1
;;
--hdfs-user-keytab)
HDFS_KEYTAB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-user-keytab)
MR_KEYTAB=$2; shift 2
AUTOMATED=1
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
SECURITY_TYPE="kerberos"
AUTOMATED=1
;;
--kinit-location)
KINIT=$2; shift 2
AUTOMATED=1
;;
--datanodes)
DATANODES=$2; shift 2
AUTOMATED=1
DATANODES=$(echo $DATANODES | tr ',' ' ')
;;
--tasktrackers)
TASKTRACKERS=$2; shift 2
AUTOMATED=1
TASKTRACKERS=$(echo $TASKTRACKERS | tr ',' ' ')
;;
--dfs-webhdfs-enabled)
DFS_WEBHDFS_ENABLED=$2; shift 2
AUTOMATED=1
;;
--hadoop-proxy-users)
HADOOP_PROXY_USERS=$2; shift 2
AUTOMATED=1
;;
--dfs-support-append)
DFS_SUPPORT_APPEND=$2; shift 2
AUTOMATED=1
;;
--hbase-user)
HBASE_USER=$2; shift 2
AUTOMATED=1
;;
--mapreduce-cluster-mapmemory-mb)
MAPREDUCE_CLUSTER_MAPMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-cluster-reducememory-mb)
MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-jobtracker-maxmapmemory-mb)
MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-jobtracker-maxreducememory-mb)
MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-map-memory-mb)
MAPREDUCE_MAP_MEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--mapreduce-reduce-memory-mb)
MAPREDUCE_REDUCE_MEMORY_MB=$2; shift 2
AUTOMATED=1
;;
--dfs-datanode-dir-perm)
DFS_DATANODE_DIR_PERM=$2; shift 2
AUTOMATED=1
;;
--dfs-block-local-path-access-user)
DFS_BLOCK_LOCAL_PATH_ACCESS_USER=$2; shift 2
AUTOMATED=1
;;
--dfs-client-read-shortcircuit)
DFS_CLIENT_READ_SHORTCIRCUIT=$2; shift 2
AUTOMATED=1
;;
--dfs-client-read-shortcircuit-skip-checksum)
DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM=$2; shift 2
AUTOMATED=1
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
AUTOSETUP=${AUTOSETUP:-1}
JAVA_HOME=${JAVA_HOME:-/usr/java/default}
HADOOP_GROUP=${HADOOP_GROUP:-hadoop}
HADOOP_NN_HOST=${HADOOP_NN_HOST:-`hostname`}
HADOOP_SNN_HOST=${HADOOP_SNN_HOST:-`hostname`}
HADOOP_NN_DIR=${HADOOP_NN_DIR:-/var/lib/hadoop/hdfs/namenode}
HADOOP_DN_DIR=${HADOOP_DN_DIR:-/var/lib/hadoop/hdfs/datanode}
HADOOP_JT_HOST=${HADOOP_JT_HOST:-`hostname`}
HADOOP_HDFS_DIR=${HADOOP_HDFS_DIR:-/var/lib/hadoop/hdfs}
HADOOP_MAPRED_DIR=${HADOOP_MAPRED_DIR:-/var/lib/hadoop/mapred}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-/var/log/hadoop}
HADOOP_PID_DIR=${HADOOP_PID_DIR:-/var/log/hadoop}
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop}
HADOOP_REPLICATION=${HADOOP_RELICATION:-3}
HADOOP_TASK_SCHEDULER=${HADOOP_TASK_SCHEDULER:-org.apache.hadoop.mapred.JobQueueTaskScheduler}
HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
HADOOP_MR_USER=${HADOOP_MR_USER:-mr}
DFS_WEBHDFS_ENABLED=${DFS_WEBHDFS_ENABLED:-false}
DFS_SUPPORT_APPEND=${DFS_SUPPORT_APPEND:-false}
HBASE_USER=${HBASE_USER:-hbase}
MAPREDUCE_CLUSTER_MAPMEMORY_MB=${MAPREDUCE_CLUSTER_MAPMEMORY_MB:--1}
MAPREDUCE_CLUSTER_REDUCEMEMORY_MB=${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB:--1}
MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB:--1}
MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB=${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB:--1}
MAPREDUCE_MAP_MEMORY_MB=${MAPREDUCE_MAP_MEMORY_MB:--1}
MAPREDUCE_REDUCE_MEMORY_MB=${MAPREDUCE_REDUCE_MEMORY_MB:--1}
KEYTAB_DIR=${KEYTAB_DIR:-/etc/security/keytabs}
HDFS_KEYTAB=${HDFS_KEYTAB:-/home/hdfs/hdfs.keytab}
MR_KEYTAB=${MR_KEYTAB:-/home/mr/mr.keytab}
DFS_WEBHDFS_ENABLED=${DFS_WEBHDFS_ENABLED:-false}
DFS_SUPPORT_APPEND=${DFS_SUPPORT_APPEND:-false}
KERBEROS_REALM=${KERBEROS_REALM:-KERBEROS.EXAMPLE.COM}
SECURITY_TYPE=${SECURITY_TYPE:-simple}
KINIT=${KINIT:-/usr/kerberos/bin/kinit}
#deault the data dir perm to 700
DFS_DATANODE_DIR_PERM=${DFS_DATANODE_DIR_PERM:-700}
if [ "${SECURITY_TYPE}" = "kerberos" ]; then
TASK_CONTROLLER="org.apache.hadoop.mapred.LinuxTaskController"
HADOOP_DN_ADDR="0.0.0.0:1019"
HADOOP_DN_HTTP_ADDR="0.0.0.0:1022"
SECURITY="true"
HADOOP_SECURE_DN_USER=${HADOOP_HDFS_USER}
else
TASK_CONTROLLER="org.apache.hadoop.mapred.DefaultTaskController"
HADOOP_DN_ADDR="0.0.0.0:50010"
HADOOP_DN_HTTP_ADDR="0.0.0.0:50075"
SECURITY="false"
HADOOP_SECURE_DN_USER=""
fi
#unset env vars
unset HADOOP_CLIENT_OPTS HADOOP_NAMENODE_OPTS HADOOP_DATANODE_OPTS HADOOP_SECONDARYNAMENODE_OPTS HADOOP_JAVA_PLATFORM_OPTS
if [ "${AUTOMATED}" != "1" ]; then
echo "Setup Hadoop Configuration"
echo
echo -n "Where would you like to put config directory? (${HADOOP_CONF_DIR}) "
read USER_HADOOP_CONF_DIR
echo -n "Where would you like to put log directory? (${HADOOP_LOG_DIR}) "
read USER_HADOOP_LOG_DIR
echo -n "Where would you like to put pid directory? (${HADOOP_PID_DIR}) "
read USER_HADOOP_PID_DIR
echo -n "What is the host of the namenode? (${HADOOP_NN_HOST}) "
read USER_HADOOP_NN_HOST
echo -n "Where would you like to put namenode data directory? (${HADOOP_NN_DIR}) "
read USER_HADOOP_NN_DIR
echo -n "Where would you like to put datanode data directory? (${HADOOP_DN_DIR}) "
read USER_HADOOP_DN_DIR
echo -n "What is the host of the jobtracker? (${HADOOP_JT_HOST}) "
read USER_HADOOP_JT_HOST
echo -n "Where would you like to put jobtracker/tasktracker data directory? (${HADOOP_MAPRED_DIR}) "
read USER_HADOOP_MAPRED_DIR
echo -n "Where is JAVA_HOME directory? (${JAVA_HOME}) "
read USER_JAVA_HOME
echo -n "Would you like to create directories/copy conf files to localhost? (Y/n) "
read USER_AUTOSETUP
echo
JAVA_HOME=${USER_USER_JAVA_HOME:-$JAVA_HOME}
HADOOP_NN_HOST=${USER_HADOOP_NN_HOST:-$HADOOP_NN_HOST}
HADOOP_NN_DIR=${USER_HADOOP_NN_DIR:-$HADOOP_NN_DIR}
HADOOP_DN_DIR=${USER_HADOOP_DN_DIR:-$HADOOP_DN_DIR}
HADOOP_JT_HOST=${USER_HADOOP_JT_HOST:-$HADOOP_JT_HOST}
HADOOP_HDFS_DIR=${USER_HADOOP_HDFS_DIR:-$HADOOP_HDFS_DIR}
HADOOP_MAPRED_DIR=${USER_HADOOP_MAPRED_DIR:-$HADOOP_MAPRED_DIR}
HADOOP_TASK_SCHEDULER=${HADOOP_TASK_SCHEDULER:-org.apache.hadoop.mapred.JobQueueTaskScheduler}
HADOOP_LOG_DIR=${USER_HADOOP_LOG_DIR:-$HADOOP_LOG_DIR}
HADOOP_PID_DIR=${USER_HADOOP_PID_DIR:-$HADOOP_PID_DIR}
HADOOP_CONF_DIR=${USER_HADOOP_CONF_DIR:-$HADOOP_CONF_DIR}
AUTOSETUP=${USER_AUTOSETUP:-y}
echo "Review your choices:"
echo
echo "Config directory : ${HADOOP_CONF_DIR}"
echo "Log directory : ${HADOOP_LOG_DIR}"
echo "PID directory : ${HADOOP_PID_DIR}"
echo "Namenode host : ${HADOOP_NN_HOST}"
echo "Namenode directory : ${HADOOP_NN_DIR}"
echo "Datanode directory : ${HADOOP_DN_DIR}"
echo "Jobtracker host : ${HADOOP_JT_HOST}"
echo "Mapreduce directory : ${HADOOP_MAPRED_DIR}"
echo "Task scheduler : ${HADOOP_TASK_SCHEDULER}"
echo "JAVA_HOME directory : ${JAVA_HOME}"
echo "Create dirs/copy conf files : ${AUTOSETUP}"
echo
echo -n "Proceed with generate configuration? (y/N) "
read CONFIRM
if [ "${CONFIRM}" != "y" ]; then
echo "User aborted setup, exiting..."
exit 1
fi
fi
if [ "${AUTOSETUP}" == "1" -o "${AUTOSETUP}" == "y" ]; then
if [ -d ${KEYTAB_DIR} ]; then
chmod 700 ${KEYTAB_DIR}/*
chown ${HADOOP_MR_USER}:${HADOOP_GROUP} ${KEYTAB_DIR}/[jt]t.service.keytab
chown ${HADOOP_HDFS_USER}:${HADOOP_GROUP} ${KEYTAB_DIR}/[dns]n.service.keytab
fi
chmod 755 -R ${HADOOP_PREFIX}/sbin/*hadoop*
chmod 755 -R ${HADOOP_PREFIX}/bin/hadoop
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-${HADOOP_PREFIX}/libexec}
chmod 755 -R ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh
mkdir -p /home/${HADOOP_MR_USER}
chown ${HADOOP_MR_USER}:${HADOOP_GROUP} /home/${HADOOP_MR_USER}
HDFS_DIR=`echo ${HADOOP_HDFS_DIR} | sed -e 's/,/ /g'`
mkdir -p ${HDFS_DIR}
if [ -e ${HADOOP_NN_DIR} ]; then
rm -rf ${HADOOP_NN_DIR}
fi
DATANODE_DIR=`echo ${HADOOP_DN_DIR} | sed -e 's/,/ /g'`
mkdir -p ${DATANODE_DIR}
MAPRED_DIR=`echo ${HADOOP_MAPRED_DIR} | sed -e 's/,/ /g'`
mkdir -p ${MAPRED_DIR}
mkdir -p ${HADOOP_CONF_DIR}
check_permission ${HADOOP_CONF_DIR}
if [ $? == 1 ]; then
echo "Full path to ${HADOOP_CONF_DIR} should be owned by root."
exit 1
fi
mkdir -p ${HADOOP_LOG_DIR}
#create the log sub dir for diff users
mkdir -p ${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
mkdir -p ${HADOOP_LOG_DIR}/${HADOOP_MR_USER}
mkdir -p ${HADOOP_PID_DIR}
chown ${HADOOP_HDFS_USER}:${HADOOP_GROUP} ${HDFS_DIR}
chown ${HADOOP_HDFS_USER}:${HADOOP_GROUP} ${DATANODE_DIR}
chmod 700 -R ${DATANODE_DIR}
chown ${HADOOP_MR_USER}:${HADOOP_GROUP} ${MAPRED_DIR}
chown ${HADOOP_HDFS_USER}:${HADOOP_GROUP} ${HADOOP_LOG_DIR}
chmod 775 ${HADOOP_LOG_DIR}
chmod 775 ${HADOOP_PID_DIR}
chown root:${HADOOP_GROUP} ${HADOOP_PID_DIR}
#change the permission and the owner
chmod 755 ${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
chown ${HADOOP_HDFS_USER}:${HADOOP_GROUP} ${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
chmod 755 ${HADOOP_LOG_DIR}/${HADOOP_MR_USER}
chown ${HADOOP_MR_USER}:${HADOOP_GROUP} ${HADOOP_LOG_DIR}/${HADOOP_MR_USER}
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/core-site.xml ${HADOOP_CONF_DIR}/core-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hdfs-site.xml ${HADOOP_CONF_DIR}/hdfs-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/mapred-site.xml ${HADOOP_CONF_DIR}/mapred-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-env.sh ${HADOOP_CONF_DIR}/hadoop-env.sh
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-policy.xml ${HADOOP_CONF_DIR}/hadoop-policy.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/commons-logging.properties ${HADOOP_CONF_DIR}/commons-logging.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/mapred-queue-acls.xml ${HADOOP_CONF_DIR}/mapred-queue-acls.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
#setup up the proxy users
setupProxyUsers
#setup short circuit read
setupShortCircuitRead
#set the owner of the hadoop dir to root
chown root ${HADOOP_PREFIX}
chown root:${HADOOP_GROUP} ${HADOOP_CONF_DIR}/hadoop-env.sh
chmod 755 ${HADOOP_CONF_DIR}/hadoop-env.sh
#set taskcontroller
chown root:${HADOOP_GROUP} ${HADOOP_CONF_DIR}/taskcontroller.cfg
chmod 400 ${HADOOP_CONF_DIR}/taskcontroller.cfg
chown root:${HADOOP_GROUP} ${HADOOP_PREFIX}/bin/task-controller
chmod 6050 ${HADOOP_PREFIX}/bin/task-controller
#generate the slaves file and include and exclude files for hdfs and mapred
echo '' > ${HADOOP_CONF_DIR}/slaves
echo '' > ${HADOOP_CONF_DIR}/dfs.include
echo '' > ${HADOOP_CONF_DIR}/dfs.exclude
echo '' > ${HADOOP_CONF_DIR}/mapred.include
echo '' > ${HADOOP_CONF_DIR}/mapred.exclude
for dn in $DATANODES
do
echo $dn >> ${HADOOP_CONF_DIR}/slaves
echo $dn >> ${HADOOP_CONF_DIR}/dfs.include
done
for tt in $TASKTRACKERS
do
echo $tt >> ${HADOOP_CONF_DIR}/mapred.include
done
echo "Configuration setup is completed."
if [[ "$HADOOP_NN_HOST" =~ "`hostname`" ]]; then
echo "Proceed to run hadoop-setup-hdfs.sh on namenode."
fi
else
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/core-site.xml ${HADOOP_CONF_DIR}/core-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hdfs-site.xml ${HADOOP_CONF_DIR}/hdfs-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/mapred-site.xml ${HADOOP_CONF_DIR}/mapred-site.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-env.sh ${HADOOP_CONF_DIR}/hadoop-env.sh
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-policy.xml ${HADOOP_CONF_DIR}/hadoop-policy.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/commons-logging.properties ${HADOOP_CONF_DIR}/commons-logging.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/mapred-queue-acls.xml ${HADOOP_CONF_DIR}/mapred-queue-acls.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties
template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
#setup up the proxy users
setupProxyUsers
#setup short circuit read
setupShortCircuitRead
chown root:${HADOOP_GROUP} ${HADOOP_CONF_DIR}/hadoop-env.sh
chmod 755 ${HADOOP_CONF_DIR}/hadoop-env.sh
#set taskcontroller
chown root:${HADOOP_GROUP} ${HADOOP_CONF_DIR}/taskcontroller.cfg
chmod 400 ${HADOOP_CONF_DIR}/taskcontroller.cfg
chown root:${HADOOP_GROUP} ${HADOOP_PREFIX}/bin/task-controller
chmod 6050 ${HADOOP_PREFIX}/bin/task-controller
#generate the slaves file and include and exclude files for hdfs and mapred
echo '' > ${HADOOP_CONF_DIR}/slaves
echo '' > ${HADOOP_CONF_DIR}/dfs.include
echo '' > ${HADOOP_CONF_DIR}/dfs.exclude
echo '' > ${HADOOP_CONF_DIR}/mapred.include
echo '' > ${HADOOP_CONF_DIR}/mapred.exclude
for dn in $DATANODES
do
echo $dn >> ${HADOOP_CONF_DIR}/slaves
echo $dn >> ${HADOOP_CONF_DIR}/dfs.include
done
for tt in $TASKTRACKERS
do
echo $tt >> ${HADOOP_CONF_DIR}/mapred.include
done
echo
echo "Configuration file has been generated in:"
echo
echo "${HADOOP_CONF_DIR}/core-site.xml"
echo "${HADOOP_CONF_DIR}/hdfs-site.xml"
echo "${HADOOP_CONF_DIR}/mapred-site.xml"
echo "${HADOOP_CONF_DIR}/hadoop-env.sh"
echo "${HADOOP_CONF_DIR}/hadoop-policy.xml"
echo "${HADOOP_CONF_DIR}/commons-logging.properties"
echo "${HADOOP_CONF_DIR}/taskcontroller.cfg"
echo "${HADOOP_CONF_DIR}/capacity-scheduler.xml"
echo "${HADOOP_CONF_DIR}/log4j.properties"
echo "${HADOOP_CONF_DIR}/hadoop-metrics2.properties"
echo
echo " to ${HADOOP_CONF_DIR} on all nodes, and proceed to run hadoop-setup-hdfs.sh on namenode."
fi

View File

@ -1,157 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Optional parameters:
--format Force namenode format
--group=hadoop Set Hadoop group
-h Display this message
--hdfs-user=hdfs Set HDFS user
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
--hdfs-user-keytab=/home/hdfs/hdfs.keytab Set HDFS user key tab
--mapreduce-user=mr Set mapreduce user
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'format' \
-l 'hdfs-user:' \
-l 'hdfs-user-keytab:' \
-l 'mapreduce-user:' \
-l 'kerberos-realm:' \
-o 'h' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--format)
FORMAT_NAMENODE=1; shift
AUTOMATED=1
;;
--group)
HADOOP_GROUP=$2; shift 2
AUTOMATED=1
;;
--hdfs-user)
HADOOP_HDFS_USER=$2; shift 2
AUTOMATED=1
;;
--mapreduce-user)
HADOOP_MR_USER=$2; shift 2
AUTOMATED=1
;;
--yarn-user)
HADOOP_YARN_USER=$2; shift 2
AUTOMATED=1
;;
--hdfs-user-keytab)
HDFS_KEYTAB=$2; shift 2
AUTOMATED=1
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
AUTOMATED=1
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
HADOOP_GROUP=${HADOOP_GROUP:-hadoop}
HADOOP_HDFS_USER=${HADOOP_HDFS_USER:-hdfs}
HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
HADOOP_MAPREDUCE_USER=${HADOOP_MR_USER:-mapred}
if [ "${KERBEROS_REALM}" != "" ]; then
# Determine kerberos location base on Linux distro.
if [ -e /etc/lsb-release ]; then
KERBEROS_BIN=/usr/bin
else
KERBEROS_BIN=/usr/kerberos/bin
fi
kinit_cmd="${KERBEROS_BIN}/kinit -k -t ${HDFS_KEYTAB} ${HADOOP_HDFS_USER}"
su -c "${kinit_cmd}" ${HADOOP_HDFS_USER}
fi
echo "Setup Hadoop Distributed File System"
echo
# Format namenode
if [ "${FORMAT_NAMENODE}" == "1" ]; then
echo "Formatting namenode"
echo
su -c "echo Y | ${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} namenode -format" ${HADOOP_HDFS_USER}
echo
fi
# Start namenode process
echo "Starting namenode process"
echo
if [ -e ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh ]; then
DAEMON_PATH=${HADOOP_PREFIX}/sbin
else
DAEMON_PATH=${HADOOP_PREFIX}/bin
fi
su -c "${DAEMON_PATH}/hadoop-daemon.sh --config ${HADOOP_CONF_DIR} start namenode" ${HADOOP_HDFS_USER}
echo
echo "Initialize HDFS file system: "
echo
#create the /user dir
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /user" ${HADOOP_HDFS_USER}
#create /tmp and give it 777
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /tmp" ${HADOOP_HDFS_USER}
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chmod 777 /tmp" ${HADOOP_HDFS_USER}
#create /mapred
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /mapred" ${HADOOP_HDFS_USER}
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chmod 700 /mapred" ${HADOOP_HDFS_USER}
su -c "${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chown ${HADOOP_MAPREDUCE_USER}:system /mapred" ${HADOOP_HDFS_USER}
if [ $? -eq 0 ]; then
echo "Completed."
else
echo "Unknown error occurred, check hadoop logs for details."
fi
echo
echo "Please startup datanode processes: /etc/init.d/hadoop-datanode start"

View File

@ -1,219 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for setup HDFS file system for single node deployment
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
if [ "$HADOOP_HOME" != "" ]; then
echo "Warning: \$HADOOP_HOME is deprecated."
echo
fi
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Optional parameters:
--default Setup system as default
-h Display this message
"
exit 1
}
template_generator() {
REGEX='(\$\{[a-zA-Z_][a-zA-Z_0-9]*\})'
cat $1 |
while read line ; do
while [[ "$line" =~ $REGEX ]] ; do
LHS=${BASH_REMATCH[1]}
RHS="$(eval echo "\"$LHS\"")"
line=${line//$LHS/$RHS}
done
echo $line >> $2
done
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'default' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
if [ -e /etc/hadoop/hadoop-env.sh ]; then
. /etc/hadoop/hadoop-env.sh
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--default)
AUTOMATED=1; shift
;;
-h)
usage
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
if [ "${AUTOMATED}" != "1" ]; then
echo "Welcome to Hadoop single node setup wizard"
echo
echo -n "Would you like to use default single node configuration? (y/n) "
read SET_CONFIG
echo -n "Would you like to format name node? (y/n) "
read SET_FORMAT
echo -n "Would you like to setup default directory structure? (y/n) "
read SET_MKDIR
echo -n "Would you like to start up Hadoop? (y/n) "
read STARTUP
echo -n "Would you like to start up Hadoop on reboot? (y/n) "
read SET_REBOOT
echo
echo "Review your choices:"
echo
echo "Setup single node configuration : ${SET_CONFIG}"
echo "Format namenode : ${SET_FORMAT}"
echo "Setup default file system structure: ${SET_MKDIR}"
echo "Start up Hadoop : ${STARTUP}"
echo "Start up Hadoop on reboot : ${SET_REBOOT}"
echo
echo -n "Proceed with setup? (y/n) "
read CONFIRM
if [ "${CONFIRM}" != "y" ]; then
echo "User aborted setup, exiting..."
exit 1
fi
else
SET_CONFIG="y"
SET_FORMAT="y"
SET_MKDIR="y"
STARTUP="y"
SET_REBOOT="y"
fi
AUTOMATED=${AUTOMATED:-0}
SET_CONFIG=${SET_CONFIG:-y}
SET_FORMAT=${SET_FORMAT:-n}
SET_MKDIR=${SET_MKDIR:-y}
STARTUP=${STARTUP:-y}
SET_REBOOT=${SET_REBOOT:-y}
# Make sure system is not already started
/etc/init.d/hadoop-namenode stop 2>/dev/null >/dev/null
/etc/init.d/hadoop-datanode stop 2>/dev/null >/dev/null
/etc/init.d/hadoop-jobtracker stop 2>/dev/null >/dev/null
/etc/init.d/hadoop-tasktracker stop 2>/dev/null >/dev/null
if [ "${SET_CONFIG}" == "y" ]; then
JAVA_HOME=${JAVA_HOME:-/usr/java/default}
HADOOP_NN_HOST=${HADOOP_NN_HOST:-localhost}
HADOOP_NN_DIR=${HADOOP_NN_DIR:-/var/lib/hadoop/hdfs/namenode}
HADOOP_DN_DIR=${HADOOP_DN_DIR:-/var/lib/hadoop/hdfs/datanode}
HADOOP_JT_HOST=${HADOOP_JT_HOST:-localhost}
HADOOP_HDFS_DIR=${HADOOP_MAPRED_DIR:-/var/lib/hadoop/hdfs}
HADOOP_MAPRED_DIR=${HADOOP_MAPRED_DIR:-/var/lib/hadoop/mapred}
HADOOP_PID_DIR=${HADOOP_PID_DIR:-/var/run/hadoop}
HADOOP_LOG_DIR="/var/log/hadoop"
HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop}
HADOOP_REPLICATION=${HADOOP_RELICATION:-1}
${HADOOP_PREFIX}/sbin/hadoop-setup-conf.sh --auto \
--hdfs-user=hdfs \
--mapreduce-user=mapred \
--conf-dir=${HADOOP_CONF_DIR} \
--datanode-dir=${HADOOP_DN_DIR} \
--hdfs-dir=${HADOOP_HDFS_DIR} \
--jobtracker-host=${HADOOP_JT_HOST} \
--log-dir=${HADOOP_LOG_DIR} \
--pid-dir=${HADOOP_PID_DIR} \
--mapred-dir=${HADOOP_MAPRED_DIR} \
--namenode-dir=${HADOOP_NN_DIR} \
--namenode-host=${HADOOP_NN_HOST} \
--replication=${HADOOP_REPLICATION}
fi
if [ ! -e ${HADOOP_NN_DIR} ]; then
rm -rf ${HADOOP_HDFS_DIR} 2>/dev/null >/dev/null
mkdir -p ${HADOOP_HDFS_DIR}
chmod 755 ${HADOOP_HDFS_DIR}
chown hdfs:hadoop ${HADOOP_HDFS_DIR}
/etc/init.d/hadoop-namenode format
elif [ "${SET_FORMAT}" == "y" ]; then
rm -rf ${HADOOP_HDFS_DIR} 2>/dev/null >/dev/null
mkdir -p ${HADOOP_HDFS_DIR}
chmod 755 ${HADOOP_HDFS_DIR}
chown hdfs:hadoop ${HADOOP_HDFS_DIR}
rm -rf ${HADOOP_NN_DIR}
/etc/init.d/hadoop-namenode format
fi
/etc/init.d/hadoop-namenode start
/etc/init.d/hadoop-datanode start
su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /user/mapred' hdfs
su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chown mapred:mapred /user/mapred' hdfs
su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /tmp' hdfs
su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chmod 777 /tmp' hdfs
/etc/init.d/hadoop-jobtracker start
/etc/init.d/hadoop-tasktracker start
if [ "${SET_REBOOT}" == "y" ]; then
if [ -e /etc/debian_version ]; then
ln -sf ../init.d/hadoop-namenode /etc/rc2.d/S90hadoop-namenode
ln -sf ../init.d/hadoop-datanode /etc/rc2.d/S91hadoop-datanode
ln -sf ../init.d/hadoop-jobtracker /etc/rc2.d/S92hadoop-jobtracker
ln -sf ../init.d/hadoop-tasktracker /etc/rc2.d/S93hadoop-tasktracker
ln -sf ../init.d/hadoop-namenode /etc/rc6.d/S10hadoop-namenode
ln -sf ../init.d/hadoop-datanode /etc/rc6.d/S11hadoop-datanode
ln -sf ../init.d/hadoop-jobtracker /etc/rc6.d/S12hadoop-jobtracker
ln -sf ../init.d/hadoop-tasktracker /etc/rc6.d/S13hadoop-tasktracker
elif [ -e /etc/redhat-release ]; then
/sbin/chkconfig hadoop-namenode --add
/sbin/chkconfig hadoop-datanode --add
/sbin/chkconfig hadoop-jobtracker --add
/sbin/chkconfig hadoop-tasktracker --add
/sbin/chkconfig hadoop-namenode on
/sbin/chkconfig hadoop-datanode on
/sbin/chkconfig hadoop-jobtracker on
/sbin/chkconfig hadoop-tasktracker on
fi
fi
if [ "${STARTUP}" != "y" ]; then
/etc/init.d/hadoop-namenode stop
/etc/init.d/hadoop-datanode stop
/etc/init.d/hadoop-jobtracker stop
/etc/init.d/hadoop-tasktracker stop
fi

View File

@ -1,183 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Run the following jobs to validate a hadoop cluster
## teragen
## terasort
## teravalidate
# If they all pass 0 will be returned and 1 otherwise
# The test will work for both secure and unsecure deploys. If the kerberos-realm
# is passed we will assume that the deploy is secure and proceed with a kinit before
# running the validation jobs.
################################################################################
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
DEFAULT_LIBEXEC_DIR="$bin"/../libexec
HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
usage() {
echo "
usage: $0 <parameters>
Optional parameters:
-h Display this message
--user=hdfs
--user_keytab=/home/hdfs/hdfs.keytab
--kerberos-realm=KERBEROS.EXAMPLE.COM Set Kerberos realm
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'user:' \
-l 'user-keytab:' \
-l 'kerberos-realm:' \
-o 'h' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--user)
TEST_USER=$2; shift 2
AUTOMATED=1
;;
--user-keytab)
USER_KEYTAB_FILE=$2; shift 2
AUTOMATED=1
;;
--kerberos-realm)
KERBEROS_REALM=$2; shift 2
AUTOMATED=1
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
#set the hadoop command and the path to the hadoop examples jar
HADOOP_CMD="${HADOOP_PREFIX}/bin/hadoop --config $HADOOP_CONF_DIR"
#find the hadoop examples jar
HADOOP_EXAMPLES_JAR=''
#find under HADOOP_PREFIX (tar ball install)
HADOOP_EXAMPLES_JAR=`find ${HADOOP_PREFIX} -name 'hadoop-mapreduce-examples-*.jar' | head -n1`
#if its not found look under /usr/share/hadoop (rpm/deb installs)
if [ "$HADOOP_EXAMPLES_JAR" == '' ]
then
HADOOP_EXAMPLES_JAR=`find /usr/share/hadoop -name 'hadoop-mapreduce-examples-*.jar' | head -n1`
fi
#if it is still empty then dont run the tests
if [ "$HADOOP_EXAMPLES_JAR" == '' ]
then
echo "Did not find hadoop-examples-*.jar under '${HADOOP_PREFIX} or '/usr/share/hadoop'"
exit 1
fi
# do a kinit if secure
if [ "${KERBEROS_REALM}" != "" ]; then
# Determine kerberos location base on Linux distro.
if [ -e /etc/lsb-release ]; then
KERBEROS_BIN=/usr/bin
else
KERBEROS_BIN=/usr/kerberos/bin
fi
kinit_cmd="su -c '${KERBEROS_BIN}/kinit -kt ${USER_KEYTAB_FILE} ${TEST_USER}' ${TEST_USER}"
echo $kinit_cmd
eval $kinit_cmd
if [ $? -ne 0 ]
then
echo "kinit command did not run successfully."
exit 1
fi
fi
#dir where to store the data on hdfs. The data is relative of the users home dir on hdfs.
PARENT_DIR="validate_deploy_`date +%s`"
TERA_GEN_OUTPUT_DIR="${PARENT_DIR}/tera_gen_data"
TERA_SORT_OUTPUT_DIR="${PARENT_DIR}/tera_sort_data"
TERA_VALIDATE_OUTPUT_DIR="${PARENT_DIR}/tera_validate_data"
#tera gen cmd
TERA_GEN_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR teragen 10000 $TERA_GEN_OUTPUT_DIR' $TEST_USER"
#tera sort cmd
TERA_SORT_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR terasort $TERA_GEN_OUTPUT_DIR $TERA_SORT_OUTPUT_DIR' $TEST_USER"
#tera validate cmd
TERA_VALIDATE_CMD="su -c '$HADOOP_CMD jar $HADOOP_EXAMPLES_JAR teravalidate $TERA_SORT_OUTPUT_DIR $TERA_VALIDATE_OUTPUT_DIR' $TEST_USER"
echo "Starting teragen...."
#run tera gen
echo $TERA_GEN_CMD
eval $TERA_GEN_CMD
if [ $? -ne 0 ]; then
echo "tera gen failed."
exit 1
fi
echo "Teragen passed starting terasort...."
#run tera sort
echo $TERA_SORT_CMD
eval $TERA_SORT_CMD
if [ $? -ne 0 ]; then
echo "tera sort failed."
exit 1
fi
echo "Terasort passed starting teravalidate...."
#run tera validate
echo $TERA_VALIDATE_CMD
eval $TERA_VALIDATE_CMD
if [ $? -ne 0 ]; then
echo "tera validate failed."
exit 1
fi
echo "teragen, terasort, teravalidate passed."
echo "Cleaning the data created by tests: $PARENT_DIR"
CLEANUP_CMD="su -c '$HADOOP_CMD dfs -rmr -skipTrash $PARENT_DIR' $TEST_USER"
echo $CLEANUP_CMD
eval $CLEANUP_CMD
exit 0

View File

@ -1,93 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop datanode
#
# chkconfig: 2345 90 10
# description: Hadoop datanode
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid"
desc="Hadoop datanode daemon"
HADOOP_PREFIX="/usr"
start() {
echo -n $"Starting $desc (hadoop-datanode): "
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
daemon ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start datanode
else
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start datanode
fi
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-datanode
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-datanode): "
if [ -n "$HADOOP_SECURE_DN_USER" ]; then
daemon ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop datanode
else
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop datanode
fi
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-datanode $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-datanode ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,85 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop jobtracker
#
# chkconfig: 2345 90 10
# description: Hadoop jobtracker
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid"
desc="Hadoop jobtracker daemon"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
start() {
echo -n $"Starting $desc (hadoop-jobtracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start jobtracker
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-jobtracker
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-jobtracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop jobtracker
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-jobtracker $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-jobtracker ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,99 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop namenode
#
# chkconfig: 2345 90 10
# description: Hadoop namenode
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid"
desc="Hadoop namenode daemon"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
start() {
echo -n $"Starting $desc (hadoop-namenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start namenode $1
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-namenode
return $RETVAL
}
upgrade() {
start -upgrade
}
stop() {
echo -n $"Stopping $desc (hadoop-namenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop namenode
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-namenode $PIDFILE
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
restart() {
stop
start
}
condrestart(){
[ -e /var/lock/subsys/hadoop-namenode ] && restart || :
}
format() {
daemon --user hdfs ${HADOOP_PREFIX}/bin/hadoop namenode -format
}
case "$1" in
start)
start
;;
upgrade)
upgrade
;;
format)
format
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart|try-restart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|try-restart|upgrade}"
exit 1
esac
exit $RETVAL

View File

@ -1,85 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop tasktracker
#
# chkconfig: 2345 90 10
# description: Hadoop tasktracker
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid"
desc="Hadoop tasktracker daemon"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
start() {
echo -n $"Starting $desc (hadoop-tasktracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start tasktracker
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-tasktracker
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-tasktracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop tasktracker
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-tasktracker $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-tasktracker ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,174 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# RPM Spec file for Hadoop version @version@
#
%define name hadoop-common
%define version @version@
%define release @package.release@
# Installation Locations
%define _prefix @package.prefix@
%define _bin_dir %{_prefix}/bin
%define _conf_dir @package.conf.dir@
%define _lib_dir %{_prefix}/lib
%define _lib64_dir %{_prefix}/lib64
%define _libexec_dir %{_prefix}/libexec
%define _log_dir @package.log.dir@
%define _pid_dir @package.pid.dir@
%define _sbin_dir %{_prefix}/sbin
%define _share_dir %{_prefix}/share
%define _var_dir @package.var.dir@
# Build time settings
%define _build_dir @package.build.dir@
%define _final_name @final.name@
%define debug_package %{nil}
# Disable brp-java-repack-jars for aspect J
%define __os_install_post \
/usr/lib/rpm/redhat/brp-compress \
%{!?__debug_package:/usr/lib/rpm/redhat/brp-strip %{__strip}} \
/usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} \
/usr/lib/rpm/redhat/brp-strip-comment-note %{__strip} %{__objdump} \
/usr/lib/rpm/brp-python-bytecompile %{nil}
# RPM searches perl files for dependancies and this breaks for non packaged perl lib
# like thrift so disable this
%define _use_internal_dependency_generator 0
%ifarch i386
%global hadoop_arch Linux-i386-32
%endif
%ifarch amd64 x86_64
%global hadoop_arch Linux-amd64-64
%endif
%ifarch noarch
%global hadoop_arch ""
%endif
Summary: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing
License: Apache License, Version 2.0
URL: http://hadoop.apache.org/core/
Vendor: Apache Software Foundation
Group: Development/Libraries
Name: %{name}
Version: %{version}
Release: %{release}
Source0: %{_final_name}-bin.tar.gz
Prefix: %{_prefix}
Prefix: %{_conf_dir}
Prefix: %{_log_dir}
Prefix: %{_pid_dir}
Buildroot: %{_build_dir}
Requires: sh-utils, textutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, jdk >= 1.6
AutoReqProv: no
Provides: hadoop
%description
The Apache Hadoop project develops open-source software for reliable, scalable,
distributed computing. Hadoop includes these subprojects:
Hadoop Common: The common utilities that support the other Hadoop subprojects.
%prep
%setup -n %{_final_name}
%build
if [ -d ${RPM_BUILD_DIR}%{_prefix} ]; then
rm -rf ${RPM_BUILD_DIR}%{_prefix}
fi
if [ -d ${RPM_BUILD_DIR}%{_log_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_log_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_conf_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_conf_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_pid_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_pid_dir}
fi
mkdir -p ${RPM_BUILD_DIR}%{_prefix}
mkdir -p ${RPM_BUILD_DIR}%{_bin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_lib_dir}
%ifarch amd64 x86_64
mkdir -p ${RPM_BUILD_DIR}%{_lib64_dir}
%endif
mkdir -p ${RPM_BUILD_DIR}%{_libexec_dir}
mkdir -p ${RPM_BUILD_DIR}%{_log_dir}
mkdir -p ${RPM_BUILD_DIR}%{_conf_dir}
mkdir -p ${RPM_BUILD_DIR}%{_pid_dir}
mkdir -p ${RPM_BUILD_DIR}%{_sbin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_share_dir}
mkdir -p ${RPM_BUILD_DIR}%{_var_dir}
#########################
#### INSTALL SECTION ####
#########################
%install
mv ${RPM_BUILD_DIR}/%{_final_name}/bin/* ${RPM_BUILD_DIR}%{_bin_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/* ${RPM_BUILD_DIR}%{_conf_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/lib/* ${RPM_BUILD_DIR}%{_lib_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/libexec/* ${RPM_BUILD_DIR}%{_libexec_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/sbin/* ${RPM_BUILD_DIR}%{_sbin_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/share/* ${RPM_BUILD_DIR}%{_share_dir}
rm -rf ${RPM_BUILD_DIR}/%{_final_name}/etc
%pre
getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -g 123 -r hadoop
%post
bash ${RPM_INSTALL_PREFIX0}/sbin/update-hadoop-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3}
%preun
bash ${RPM_INSTALL_PREFIX0}/sbin/update-hadoop-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3} \
--uninstall
%files
%defattr(-,root,root)
%attr(0755,root,hadoop) %{_log_dir}
%attr(0775,root,hadoop) %{_pid_dir}
%config(noreplace) %{_conf_dir}/configuration.xsl
%config(noreplace) %{_conf_dir}/core-site.xml
%config(noreplace) %{_conf_dir}/hadoop-env.sh
%config(noreplace) %{_conf_dir}/hadoop-metrics.properties
%config(noreplace) %{_conf_dir}/hadoop-metrics2.properties
%config(noreplace) %{_conf_dir}/hadoop-policy.xml
%config(noreplace) %{_conf_dir}/log4j.properties
%config(noreplace) %{_conf_dir}/masters
%config(noreplace) %{_conf_dir}/slaves
%config{noreplace) %{_conf_dir}/fair-scheduler.xml
%{_conf_dir}/hadoop-env.sh.template
%{_conf_dir}/ssl-client.xml.example
%{_conf_dir}/ssl-server.xml.example
%{_prefix}

View File

@ -1,198 +0,0 @@
<?xml version="1.0"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- This is the configuration file for the resource manager in Hadoop. -->
<!-- You can configure various scheduling parameters related to queues. -->
<!-- The properties for a queue follow a naming convention,such as, -->
<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
<configuration>
<property>
<name>mapred.capacity-scheduler.maximum-system-jobs</name>
<value>3000</value>
<description>Maximum number of jobs in the system which can be initialized,
concurrently, by the CapacityScheduler.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.capacity</name>
<value>100</value>
<description>Percentage of the number of slots in the cluster that are
to be available for jobs in this queue.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
<value>-1</value>
<description>
maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
Default value of -1 implies a queue can use complete capacity of the cluster.
This property could be to curtail certain jobs which are long running in nature from occupying more than a
certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of
other queues being affected.
One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in
absolute terms would increase accordingly.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.supports-priority</name>
<value>false</value>
<description>If true, priorities of jobs will be taken into
account in scheduling decisions.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
<value>100</value>
<description> Each queue enforces a limit on the percentage of resources
allocated to a user at any given time, if there is competition for them.
This user limit can vary between a minimum and maximum value. The former
depends on the number of users who have submitted jobs, and the latter is
set to this property value. For example, suppose the value of this
property is 25. If two users have submitted jobs to a queue, no single
user can use more than 50% of the queue resources. If a third user submits
a job, no single user can use more than 33% of the queue resources. With 4
or more users, no user can use more than 25% of the queue's resources. A
value of 100 implies no user limits are imposed.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
<value>1</value>
<description>The multiple of the queue capacity which can be configured to
allow a single user to acquire more slots.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
<value>200000</value>
<description>The maximum number of tasks, across all jobs in the queue,
which can be initialized concurrently. Once the queue's jobs exceed this
limit they will be queued on disk.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
<value>100000</value>
<description>The maximum number of tasks per-user, across all the of the
user's jobs in the queue, which can be initialized concurrently. Once the
user's jobs exceed this limit they will be queued on disk.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
<value>10</value>
<description>The multipe of (maximum-system-jobs * queue-capacity) used to
determine the number of jobs which are accepted by the scheduler.
</description>
</property>
<!-- The default configuration settings for the capacity task scheduler -->
<!-- The default values would be applied to all the queues which don't have -->
<!-- the appropriate property for the particular queue -->
<property>
<name>mapred.capacity-scheduler.default-supports-priority</name>
<value>false</value>
<description>If true, priorities of jobs will be taken into
account in scheduling decisions by default in a job queue.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
<value>100</value>
<description>The percentage of the resources limited to a particular user
for the job queue at any given point of time by default.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.default-user-limit-factor</name>
<value>1</value>
<description>The default multiple of queue-capacity which is used to
determine the amount of slots a single user can consume concurrently.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
<value>200000</value>
<description>The default maximum number of tasks, across all jobs in the
queue, which can be initialized concurrently. Once the queue's jobs exceed
this limit they will be queued on disk.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
<value>100000</value>
<description>The default maximum number of tasks per-user, across all the of
the user's jobs in the queue, which can be initialized concurrently. Once
the user's jobs exceed this limit they will be queued on disk.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
<value>10</value>
<description>The default multipe of (maximum-system-jobs * queue-capacity)
used to determine the number of jobs which are accepted by the scheduler.
</description>
</property>
<!-- Capacity scheduler Job Initialization configuration parameters -->
<property>
<name>mapred.capacity-scheduler.init-poll-interval</name>
<value>5000</value>
<description>The amount of time in miliseconds which is used to poll
the job queues for jobs to initialize.
</description>
</property>
<property>
<name>mapred.capacity-scheduler.init-worker-threads</name>
<value>5</value>
<description>Number of worker threads which would be used by
Initialization poller to initialize jobs in a set of queue.
If number mentioned in property is equal to number of job queues
then a single thread would initialize jobs in a queue. If lesser
then a thread would get a set of queues assigned. If the number
is greater then number of threads would be equal to number of
job queues.
</description>
</property>
</configuration>

View File

@ -1,25 +0,0 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Logging Implementation
#Log4J
org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
#JDK Logger
#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

View File

@ -1,100 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>local.realm</name>
<value>${KERBEROS_REALM}</value>
</property>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://${HADOOP_NN_HOST}:8020</value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.
</description>
<final>true</final>
</property>
<property>
<name>fs.trash.interval</name>
<value>360</value>
<description>Number of minutes between trash checkpoints.
If zero, the trash feature is disabled.
</description>
</property>
<property>
<name>hadoop.security.auth_to_local</name>
<value>
RULE:[2:$1@$0]([jt]t@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0]([nd]n@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/
RULE:[2:$1@$0](mapred@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0](hdfs@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/
RULE:[2:$1@$0](mapredqa@.*${KERBEROS_REALM})s/.*/${HADOOP_MR_USER}/
RULE:[2:$1@$0](hdfsqa@.*${KERBEROS_REALM})s/.*/${HADOOP_HDFS_USER}/
RULE:[2:$1@$0](hm@.*${KERBEROS_REALM})s/.*/${HBASE_USER}/
RULE:[2:$1@$0](rs@.*${KERBEROS_REALM})s/.*/${HBASE_USER}/
DEFAULT
</value>
<description></description>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>${SECURITY_TYPE}</value>
<description>
Set the authentication for the cluster. Valid values are: simple or
kerberos.
</description>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>${SECURITY}</value>
<description>
Enable authorization for different protocols.
</description>
</property>
<property>
<name>hadoop.security.groups.cache.secs</name>
<value>14400</value>
</property>
<property>
<name>hadoop.kerberos.kinit.command</name>
<value>${KINIT}</value>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
</property>
</configuration>

View File

@ -1,78 +0,0 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set Hadoop-specific environment variables here.
# The only required environment variable is JAVA_HOME. All others are
# optional. When running a distributed configuration it is best to
# set JAVA_HOME in this file, so that it is correctly defined on
# remote nodes.
# The java implementation to use.
export JAVA_HOME=${JAVA_HOME}
# The jsvc implementation to use. Jsvc is required to run secure datanodes.
#export JSVC_HOME=${JSVC_HOME}
export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
# Extra Java CLASSPATH elements. Automatically insert capacity-scheduler.
for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
if [ "$HADOOP_CLASSPATH" ]; then
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
else
export HADOOP_CLASSPATH=$f
fi
done
# The maximum amount of heap to use, in MB. Default is 1000.
#export HADOOP_HEAPSIZE=
#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
# Extra Java runtime options. Empty by default.
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
# Command specific options appended to HADOOP_OPTS when specified
export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
# The ZKFC does not need a large heap, and keeping it small avoids
# any potential for long GC pauses
export HADOOP_ZKFC_OPTS="-Xmx256m $HADOOP_ZKFC_OPTS"
# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
# On secure datanodes, user to run the datanode as after dropping privileges
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
# Where log files are stored. $HADOOP_HOME/logs by default.
#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
# The directory where pid files are stored. /tmp by default.
export HADOOP_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
# A string representing this instance of hadoop. $USER by default.
export HADOOP_IDENT_STRING=$USER

View File

@ -1,20 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# syntax: [prefix].[source|sink|jmx].[instance].[options]
# See package.html for org.apache.hadoop.metrics2 for details
*.period=60

View File

@ -1,250 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>security.client.protocol.acl</name>
<value>*</value>
<description>ACL for ClientProtocol, which is used by user code
via the DistributedFileSystem.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.client.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
for block recovery.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for DatanodeProtocol, which is used by datanodes to
communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.inter.datanode.protocol.acl</name>
<value>*</value>
<description>ACL for InterDatanodeProtocol, the inter-datanode protocol
for updating generation timestamp.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.namenode.protocol.acl</name>
<value>*</value>
<description>ACL for NamenodeProtocol, the protocol used by the secondary
namenode to communicate with the namenode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.inter.tracker.protocol.acl</name>
<value>*</value>
<description>ACL for InterTrackerProtocol, used by the tasktrackers to
communicate with the jobtracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.submission.protocol.acl</name>
<value>*</value>
<description>ACL for JobSubmissionProtocol, used by job clients to
communciate with the jobtracker for job submission, querying job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.task.umbilical.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.admin.operations.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value>
<description>ACL for AdminOperationsProtocol. Used for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.refresh.usertogroups.mappings.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value>
<description>ACL for RefreshUserMappingsProtocol. Used to refresh
users mappings. The ACL is a comma-separated list of user and
group names. The user and group list is separated by a blank. For
e.g. "alice,bob users,wheel". A special value of "*" means all
users are allowed.</description>
</property>
<property>
<name>security.refresh.policy.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value>
<description>ACL for RefreshAuthorizationPolicyProtocol, used by the
dfsadmin and mradmin commands to refresh the security policy in-effect.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<!-- YARN Protocols -->
<property>
<name>security.resourcetracker.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value>
<description>ACL for ResourceTracker protocol, used by the
ResourceManager and NodeManager to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.admin.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value>
<description>ACL for RMAdminProtocol, for admin commands.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.client.resourcemanager.protocol.acl</name>
<value>*</value>
<description>ACL for ClientRMProtocol, used by the ResourceManager
and applications submission clients to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.applicationmaster.resourcemanager.protocol.acl</name>
<value>*</value>
<description>ACL for AMRMProtocol, used by the ResourceManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.containermanager.protocol.acl</name>
<value>*</value>
<description>ACL for ContainerManager protocol, used by the NodeManager
and ApplicationMasters to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.resourcelocalizer.protocol.acl</name>
<value>*</value>
<description>ACL for ResourceLocalizer protocol, used by the NodeManager
and ResourceLocalizer to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.task.protocol.acl</name>
<value>*</value>
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
tasks to communicate with the parent tasktracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.job.client.protocol.acl</name>
<value>*</value>
<description>ACL for MRClientProtocol, used by job clients to
communciate with the MR ApplicationMaster to query job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.ha.service.protocol.acl</name>
<value>*</value>
<description>ACL for HAService protocol used by HAAdmin to manage the
active and stand-by states of namenode.</description>
</property>
<property>
<name>security.zkfc.protocol.acl</name>
<value>*</value>
<description>ACL for access to the ZK Failover Controller
</description>
</property>
<property>
<name>security.mrhs.client.protocol.acl</name>
<value>*</value>
<description>ACL for HSClientProtocol, used by job clients to
communciate with the MR History Server job status etc.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.qjournal.service.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value>
<description>ACL for QJournalProtocol, used by the NN to communicate with
JNs when using the QuorumJournalManager for edit logs.</description>
</property>
</configuration>

View File

@ -1,269 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!-- file system properties -->
<property>
<name>dfs.namenode.name.dir</name>
<value>${HADOOP_NN_DIR}</value>
<description>Determines where on the local filesystem the DFS name node
should store the name table. If this is a comma-delimited list
of directories then the name table is replicated in all of the
directories, for redundancy. </description>
<final>true</final>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>${HADOOP_DN_DIR}</value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
list of directories, then data will be stored in all named
directories, typically on different devices.
Directories that do not exist are ignored.
</description>
<final>true</final>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
<value>1.0f</value>
<description>
Specifies the percentage of blocks that should satisfy the minimal
replication requirement defined by dfs.namenode.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
<name>dfs.datanode.address</name>
<value>${HADOOP_DN_ADDR}</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>${HADOOP_DN_HTTP_ADDR}</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>${HADOOP_NN_HOST}:50070</value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.
</description>
<final>true</final>
</property>
<!-- Permissions configuration -->
<property>
<name>dfs.umaskmode</name>
<value>077</value>
<description>
The octal umask used when creating files and directories.
</description>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>${SECURITY}</value>
<description>
Are access tokens are used as capabilities for accessing datanodes.
</description>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>nn/_HOST@${local.realm}</value>
<description>
Kerberos principal name for the NameNode
</description>
</property>
<property>
<name>dfs.secondary.namenode.kerberos.principal</name>
<value>nn/_HOST@${local.realm}</value>
<description>
Kerberos principal name for the secondary NameNode.
</description>
</property>
<property>
<name>dfs.namenode.kerberos.https.principal</name>
<value>host/_HOST@${local.realm}</value>
<description>
The Kerberos principal for the host that the NameNode runs on.
</description>
</property>
<property>
<name>dfs.secondary.namenode.kerberos.https.principal</name>
<value>host/_HOST@${local.realm}</value>
<description>
The Kerberos principal for the hostthat the secondary NameNode runs on.
</description>
</property>
<property>
<name>dfs.datanode.kerberos.principal</name>
<value>dn/_HOST@${local.realm}</value>
<description>
The Kerberos principal that the DataNode runs as. "_HOST" is replaced by
the real host name.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@${local.realm}</value>
<description>
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPNEGO specification.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
</description>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
Combined keytab file containing the namenode service and host principals.
</description>
</property>
<property>
<name>dfs.secondary.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
Combined keytab file containing the namenode service and host principals.
</description>
</property>
<property>
<name>dfs.datanode.keytab.file</name>
<value>/etc/security/keytabs/dn.service.keytab</value>
<description>
The filename of the keytab file for the DataNode.
</description>
</property>
<property>
<name>dfs.https.port</name>
<value>50470</value>
<description>The https port where namenode binds</description>
</property>
<property>
<name>dfs.namenode.https-address</name>
<value>${HADOOP_NN_HOST}:50470</value>
<description>The https address where namenode binds</description>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>${DFS_DATANODE_DIR_PERM}</value>
<description>The permissions that should be there on dfs.datanode.data.dir
directories. The datanode will not come up if the permissions are
different on existing dfs.datanode.data.dir directories. If the directories
don't exist, they will be created with this permission.
</description>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value>${HADOOP_HDFS_USER}</value>
<description>ACL for who all can view the default servlets in the HDFS</description>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>${HADOOP_GROUP}</value>
<description>The name of the group of super-users.</description>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>${HADOOP_NN_HOST}:50070</value>
<description>
The address and the base port where the dfs namenode web ui will listen on.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.namenode.https-address</name>
<value>${HADOOP_NN_HOST}:50470</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>${HADOOP_SNN_HOST}:50090</value>
<description>
The secondary namenode http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.hosts</name>
<value>${HADOOP_CONF_DIR}/dfs.include</value>
<description>Names a file that contains a list of hosts that are
permitted to connect to the namenode. The full pathname of the file
must be specified. If the value is empty, all hosts are
permitted.</description>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>${HADOOP_CONF_DIR}/dfs.exclude</value>
<description>Names a file that contains a list of hosts that are
not permitted to connect to the namenode. The full pathname of the
file must be specified. If the value is empty, no hosts are
excluded.
</description>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>${DFS_WEBHDFS_ENABLED}</value>
<description>Enable or disable webhdfs. Defaults to false</description>
</property>
<property>
<name>dfs.support.append</name>
<value>${DFS_SUPPORT_APPEND}</value>
<description>Enable or disable append. Defaults to false</description>
</property>
</configuration>

View File

@ -1,212 +0,0 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
hadoop.root.logger=INFO,console
hadoop.log.dir=.
hadoop.log.file=hadoop.log
# Define the root logger to the system property "hadoop.root.logger".
log4j.rootLogger=${hadoop.root.logger}, EventCounter
# Logging Threshold
log4j.threshold=ALL
# Null Appender
log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
#
# Rolling File Appender - cap space usage at 5gb.
#
hadoop.log.maxfilesize=256MB
hadoop.log.maxbackupindex=20
log4j.appender.RFA=org.apache.log4j.RollingFileAppender
log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# Daily Rolling File Appender
#
log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
# Rollver at midnight
log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
# 30-day backup
#log4j.appender.DRFA.MaxBackupIndex=30
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.console=org.apache.log4j.ConsoleAppender
log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
#
# TaskLog Appender
#
#Default values
hadoop.tasklog.taskid=null
hadoop.tasklog.iscleanup=false
hadoop.tasklog.noKeepSplits=4
hadoop.tasklog.totalLogFileSize=100
hadoop.tasklog.purgeLogSplits=true
hadoop.tasklog.logsRetainHours=12
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security appender
#
hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
hadoop.security.log.file=SecurityAuth-${user.name}.audit
log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
#
# Daily Rolling Security appender
#
log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#
# hdfs audit logging
#
hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
# Custom Logging levels
#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
# Jets3t library
log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
#
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender
#
# Use following logger to send summary to separate file defined by
# hadoop.mapreduce.jobsummary.log.file :
# hadoop.mapreduce.jobsummary.logger=INFO,JSA
#
hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
hadoop.mapreduce.jobsummary.log.maxbackupindex=20
log4j.appender.JSA=org.apache.log4j.RollingFileAppender
log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
#
# Yarn ResourceManager Application Summary Log
#
# Set the ResourceManager summary log filename
#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
# Set the ResourceManager summary log level and appender
#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
# Appender for ResourceManager Application Summary Log
# Requires the following properties to be set
# - hadoop.log.dir (Hadoop Log directory)
# - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
# - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
#log4j.appender.RMSUMMARY.MaxFileSize=256MB
#log4j.appender.RMSUMMARY.MaxBackupIndex=20
#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n

View File

@ -1,33 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<property>
<name>mapred.queue.default.acl-submit-job</name>
<value>*</value>
</property>
<property>
<name>mapred.queue.default.acl-administer-jobs</name>
<value>*</value>
</property>
</configuration>

View File

@ -1,308 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Copyright 2011 The Apache Software Foundation
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
<value>250</value>
<description>Normally, this is the amount of time before killing
processes, and the recommended-default is 5.000 seconds - a value of
5000 here. In this case, we are using it solely to blast tasks before
killing them, and killing them very quickly (1/4 second) to guarantee
that we do not leave VMs around for later jobs.
</description>
</property>
<property>
<name>mapred.system.dir</name>
<value>/mapred/mapredsystem</value>
<final>true</final>
</property>
<property>
<name>mapred.job.tracker</name>
<value>${HADOOP_JT_HOST}:9000</value>
<final>true</final>
</property>
<property>
<name>mapred.job.tracker.http.address</name>
<value>${HADOOP_JT_HOST}:50030</value>
<final>true</final>
</property>
<property>
<name>mapred.local.dir</name>
<value>${HADOOP_MAPRED_DIR}</value>
<final>true</final>
</property>
<property>
<name>mapreduce.cluster.administrators</name>
<value>${HADOOP_MR_USER}</value>
</property>
<property>
<name>mapred.map.tasks.speculative.execution</name>
<value>false</value>
<description>If true, then multiple instances of some map tasks
may be executed in parallel.</description>
</property>
<property>
<name>mapred.reduce.tasks.speculative.execution</name>
<value>false</value>
<description>If true, then multiple instances of some reduce tasks
may be executed in parallel.</description>
</property>
<property>
<name>mapred.output.compression.type</name>
<value>BLOCK</value>
<description>If the job outputs are to compressed as SequenceFiles, how
should they be compressed? Should be one of NONE, RECORD or BLOCK.
</description>
</property>
<property>
<name>jetty.connector</name>
<value>org.mortbay.jetty.nio.SelectChannelConnector</value>
</property>
<property>
<name>mapred.task.tracker.task-controller</name>
<value>${TASK_CONTROLLER}</value>
</property>
<property>
<name>mapred.child.root.logger</name>
<value>INFO,TLA</value>
</property>
<property>
<name>mapred.child.java.opts</name>
<value>-server -Xmx640m -Djava.net.preferIPv4Stack=true</value>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.active</name>
<value>true</value>
<description>Indicates if persistency of job status information is
active or not.
</description>
</property>
<property>
<name>mapred.job.tracker.persist.jobstatus.dir</name>
<value>file:///${HADOOP_LOG_DIR}/${HADOOP_MR_USER}/jobstatus</value>
<description>The directory where the job status information is persisted
in a file system to be available after it drops of the memory queue and
between jobtracker restarts.
</description>
</property>
<property>
<name>mapred.job.tracker.history.completed.location</name>
<value>/mapred/history/done</value>
</property>
<property>
<name>mapred.heartbeats.in.second</name>
<value>200</value>
<description>to enable HADOOP:5784</description>
</property>
<property>
<name>mapreduce.tasktracker.outofband.heartbeat</name>
<value>true</value>
<description>to enable MAPREDUCE:270</description>
</property>
<property>
<name>mapred.jobtracker.maxtasks.per.job</name>
<value>200000</value>
<final>true</final>
<description>The maximum number of tasks for a single job.
A value of -1 indicates that there is no maximum.
</description>
</property>
<property>
<name>mapreduce.jobtracker.kerberos.principal</name>
<value>jt/_HOST@${local.realm}</value>
<description>
JT principal
</description>
</property>
<property>
<name>mapreduce.tasktracker.kerberos.principal</name>
<value>tt/_HOST@${local.realm}</value>
<description>
TT principal.
</description>
</property>
<property>
<name>hadoop.job.history.user.location</name>
<value>none</value>
</property>
<property>
<name>mapreduce.jobtracker.keytab.file</name>
<value>/etc/security/keytabs/jt.service.keytab</value>
<description>
The keytab for the jobtracker principal.
</description>
</property>
<property>
<name>mapreduce.tasktracker.keytab.file</name>
<value>/etc/security/keytabs/tt.service.keytab</value>
<description>The filename of the keytab for the task tracker</description>
</property>
<property>
<name>mapreduce.jobtracker.staging.root.dir</name>
<value>/user</value>
<description>The Path prefix for where the staging directories should be
placed. The next level is always the user's
name. It is a path in the default file system.
</description>
</property>
<property>
<name>mapreduce.job.acl-modify-job</name>
<value></value>
</property>
<property>
<name>mapreduce.job.acl-view-job</name>
<value>Dr.Who</value>
</property>
<property>
<name>mapreduce.tasktracker.group</name>
<value>${HADOOP_GROUP}</value>
<description>The group that the task controller uses for accessing the
task controller. The mapred user must be a member and users should *not*
be members.
</description>
</property>
<property>
<name>mapred.acls.enabled</name>
<value>true</value>
</property>
<property>
<name>mapred.jobtracker.taskScheduler</name>
<value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
</property>
<property>
<name>mapred.queue.names</name>
<value>default</value>
</property>
<!-- settings for the history server -->
<property>
<name>mapreduce.history.server.embedded</name>
<value>false</value>
</property>
<property>
<name>mapreduce.history.server.http.address</name>
<value>${HADOOP_JT_HOST}:51111</value>
</property>
<property>
<name>mapreduce.jobhistory.kerberos.principal</name>
<value>jt/_HOST@${local.realm}</value>
<description>history server principal</description>
</property>
<property>
<name>mapreduce.jobhistory.keytab.file</name>
<value>/etc/security/keytabs/jt.service.keytab</value>
<description>
The keytab for the jobtracker principal.
</description>
</property>
<property>
<name>mapred.hosts</name>
<value>${HADOOP_CONF_DIR}/mapred.include</value>
<description>Names a file that contains the list of nodes that may
connect to the jobtracker. If the value is empty, all hosts are
permitted.</description>
</property>
<property>
<name>mapred.hosts.exclude</name>
<value>${HADOOP_CONF_DIR}/mapred.exclude</value>
<description>Names a file that contains the list of hosts that
should be excluded by the jobtracker. If the value is empty, no
hosts are excluded.</description>
</property>
<property>
<name>mapred.jobtracker.retirejob.check</name>
<value>10000</value>
</property>
<property>
<name>mapred.jobtracker.retirejob.interval</name>
<value>0</value>
</property>
<property>
<name>mapreduce.cluster.mapmemory.mb</name>
<value>${MAPREDUCE_CLUSTER_MAPMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.cluster.reducememory.mb</name>
<value>${MAPREDUCE_CLUSTER_REDUCEMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.jobtracker.maxmapmemory.mb</name>
<value>${MAPREDUCE_JOBTRACKER_MAXMAPMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.jobtracker.maxreducememory.mb</name>
<value>${MAPREDUCE_JOBTRACKER_MAXREDUCEMEMORY_MB}</value>
</property>
<property>
<name>mapreduce.map.memory.mb</name>
<value>${MAPREDUCE_MAP_MEMORY_MB}</value>
</property>
<property>
<name>mapreduce.reduce.memory.mb</name>
<value>${MAPREDUCE_REDUCE_MEMORY_MB}</value>
</property>
</configuration>

View File

@ -1,21 +0,0 @@
# Copyright 2011 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mapreduce.cluster.local.dir=${HADOOP_MAPRED_DIR}
mapreduce.tasktracker.group=${HADOOP_GROUP}
hadoop.log.dir=${HADOOP_LOG_DIR}/${HADOOP_MR_USER}

View File

@ -1,172 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script configures hadoop-env.sh and symlinkis directories for
# relocating RPM locations.
usage() {
echo "
usage: $0 <parameters>
Required parameters:
--prefix=PREFIX path to install into
Optional parameters:
--arch=i386 OS Architecture
--bin-dir=PREFIX/bin Executable directory
--conf-dir=/etc/hadoop Configuration directory
--log-dir=/var/log/hadoop Log directory
--pid-dir=/var/run PID file location
--sbin-dir=PREFIX/sbin System executable directory
"
exit 1
}
template_generator() {
REGEX='(\$\{[a-zA-Z_][a-zA-Z_0-9]*\})'
cat $1 |
while read line ; do
while [[ "$line" =~ $REGEX ]] ; do
LHS=${BASH_REMATCH[1]}
RHS="$(eval echo "\"$LHS\"")"
line=${line//$LHS/$RHS}
done
echo $line >> $2
done
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'arch:' \
-l 'prefix:' \
-l 'bin-dir:' \
-l 'conf-dir:' \
-l 'lib-dir:' \
-l 'log-dir:' \
-l 'pid-dir:' \
-l 'sbin-dir:' \
-l 'uninstall' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--arch)
ARCH=$2 ; shift 2
;;
--prefix)
PREFIX=$2 ; shift 2
;;
--bin-dir)
BIN_DIR=$2 ; shift 2
;;
--log-dir)
LOG_DIR=$2 ; shift 2
;;
--lib-dir)
LIB_DIR=$2 ; shift 2
;;
--conf-dir)
CONF_DIR=$2 ; shift 2
;;
--pid-dir)
PID_DIR=$2 ; shift 2
;;
--sbin-dir)
SBIN_DIR=$2 ; shift 2
;;
--uninstall)
UNINSTALL=1; shift
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
for var in PREFIX; do
if [ -z "$(eval "echo \$$var")" ]; then
echo Missing param: $var
usage
fi
done
ARCH=${ARCH:-i386}
HADOOP_PREFIX=$PREFIX
HADOOP_BIN_DIR=${BIN_DIR:-$PREFIX/bin}
HADOOP_CONF_DIR=${CONF_DIR:-$PREFIX/etc/hadoop}
HADOOP_LIB_DIR=${LIB_DIR:-$PREFIX/lib}
HADOOP_LOG_DIR=${LOG_DIR:-$PREFIX/var/log}
HADOOP_PID_DIR=${PID_DIR:-$PREFIX/var/run}
HADOOP_SBIN_DIR=${SBIN_DIR:-$PREFIX/sbin}
UNINSTALL=${UNINSTALL:-0}
if [ "${ARCH}" != "i386" ]; then
HADOOP_LIB_DIR=${HADOOP_LIB_DIR}64
fi
if [ "${UNINSTALL}" -eq "1" ]; then
# Remove symlinks
if [ "${HADOOP_CONF_DIR}" != "${HADOOP_PREFIX}/etc/hadoop" ]; then
rm -rf ${HADOOP_PREFIX}/etc/hadoop
fi
rm -f /etc/default/hadoop-env.sh
if [ -d /etc/profile.d ]; then
rm -f /etc/profile.d/hadoop-env.sh
fi
else
# Create symlinks
if [ "${HADOOP_CONF_DIR}" != "${HADOOP_PREFIX}/etc/hadoop" ]; then
mkdir -p ${HADOOP_PREFIX}/etc
ln -sf ${HADOOP_CONF_DIR} ${HADOOP_PREFIX}/etc/hadoop
fi
ln -sf ${HADOOP_CONF_DIR}/hadoop-env.sh /etc/default/hadoop-env.sh
if [ -d /etc/profile.d ]; then
ln -sf ${HADOOP_CONF_DIR}/hadoop-env.sh /etc/profile.d/hadoop-env.sh
fi
mkdir -p ${HADOOP_LOG_DIR}
chown root:hadoop ${HADOOP_LOG_DIR}
chmod 775 ${HADOOP_LOG_DIR}
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 775 ${HADOOP_PID_DIR}
fi
TFILE="/tmp/$(basename $0).$$.tmp"
if [ -z "${JAVA_HOME}" ]; then
if [ -e /etc/debian_version ]; then
JAVA_HOME=`update-alternatives --config java | grep java | cut -f2 -d':' | cut -f2 -d' ' | sed -e 's/\/bin\/java//'`
else
JAVA_HOME=/usr/java/default
fi
fi
template_generator ${HADOOP_CONF_DIR}/hadoop-env.sh.template $TFILE
cp ${TFILE} ${CONF_DIR}/hadoop-env.sh
rm -f ${TFILE}
fi

View File

@ -1,15 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/etc/hadoop

View File

@ -1,24 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Package: hadoop-hdfs
Version: @version@
Section: misc
Priority: optional
Provides: hadoop-hdfs
Architecture: all
Depends: openjdk-6-jre-headless, hadoop-common
Maintainer: Apache Software Foundation <general@hadoop.apache.org>
Description: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.
Distribution: development

View File

@ -1,24 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-hdfs-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/usr/sbin/groupdel hadoop 2> /dev/null >dev/null
exit 0

View File

@ -1,18 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/usr/sbin/useradd --comment "Hadoop HDFS" -u 201 --shell /bin/bash -M -r --groups hadoop --home /var/lib/hadoop/hdfs hdfs 2> /dev/null || :

View File

@ -1,25 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-hdfs-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop \
--uninstal

View File

@ -1,142 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-datanode
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Name Node server
### END INIT INFO
set -e
# /etc/init.d/hadoop-datanode: start and stop the Apache Hadoop Data Node daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-datanode_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-datanode_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Data Node server not in use (/etc/hadoop/hadoop-datanode_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Data Node server" "hadoop-datanode"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Data Node server" "hadoop-datanode"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Data Node server" "hadoop-datanode"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start datanode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid ${JAVA_HOME}/bin/java hadoop-datanode && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-datanode {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,154 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-namenode
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Name Node server
### END INIT INFO
set -e
# /etc/init.d/hadoop-namenode: start and stop the Apache Hadoop Name Node daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-namenode_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-namenode_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Name Node server not in use (/etc/hadoop/hadoop-namenode_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
format() {
su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} namenode -format' hdfs
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Name Node server" "hadoop-namenode"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Name Node server" "hadoop-namenode"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
format)
log_daemon_msg "Formatting Apache Hadoop Name Node" "hadoop-namenode"
format
if [ $? -eq 0 ]; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Name Node server" "hadoop-namenode"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start namenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid ${JAVA_HOME}/bin/java hadoop-namenode && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-namenode {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,142 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-secondarynamenode
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Name Node server
### END INIT INFO
set -e
# /etc/init.d/hadoop-secondarynamenode: start and stop the Apache Hadoop Secondary Name Node daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-secondarynamenode_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-secondarynamenode_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Name Node server not in use (/etc/hadoop/hadoop-secondarynamenode_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Secondary Name Node server" "hadoop-secondarynamenode"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start secondarynamenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Secondary Name Node server" "hadoop-secondarynamenode"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Secondary Name Node server" "hadoop-secondarynamenode"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start secondarynamenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Secondary Name Node server" "hadoop-secondarynamenode"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid -c hdfs -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start secondarynamenode; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid ${JAVA_HOME}/bin/java hadoop-secondarynamenode && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-secondarynamenode {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,84 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop datanode
#
# chkconfig: 2345 90 10
# description: Hadoop datanode
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-datanode.pid"
desc="Hadoop datanode daemon"
start() {
echo -n $"Starting $desc (hadoop-datanode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start datanode
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-datanode
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-datanode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop datanode
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-datanode $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-datanode ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,98 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop namenode
#
# chkconfig: 2345 90 10
# description: Hadoop namenode
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-namenode.pid"
desc="Hadoop namenode daemon"
start() {
echo -n $"Starting $desc (hadoop-namenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start namenode $1
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-namenode
return $RETVAL
}
upgrade() {
start -upgrade
}
stop() {
echo -n $"Stopping $desc (hadoop-namenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop namenode
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-namenode $PIDFILE
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
restart() {
stop
start
}
condrestart(){
[ -e /var/lock/subsys/hadoop-namenode ] && restart || :
}
format() {
daemon --user hdfs ${HADOOP_PREFIX}/bin/hdfs namenode -format
}
case "$1" in
start)
start
;;
upgrade)
upgrade
;;
format)
format
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart|try-restart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|try-restart|upgrade}"
exit 1
esac
exit $RETVAL

View File

@ -1,92 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop secondarynamenode
#
# chkconfig: 2345 90 10
# description: Hadoop secondarynamenode
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-hdfs-secondarynamenode.pid"
desc="Hadoop secondary namenode daemon"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
start() {
echo -n $"Starting $desc (hadoop-secondarynamenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start secondarynamenode $1
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-secondarynamenode
return $RETVAL
}
upgrade() {
start -upgrade
}
stop() {
echo -n $"Stopping $desc (hadoop-secondarynamenode): "
daemon --user hdfs ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop secondarynamenode
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-secondarynamenode $PIDFILE
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
restart() {
stop
start
}
condrestart(){
[ -e /var/lock/subsys/hadoop-secondarynamenode ] && restart || :
}
case "$1" in
start)
start
;;
upgrade)
upgrade
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart|try-restart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|try-restart|upgrade}"
exit 1
esac
exit $RETVAL

View File

@ -1,176 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# RPM Spec file for Hadoop version @version@
#
%define name hadoop-hdfs
%define version @version@
%define release @package.release@
%define major_version %(echo %{version} | cut -d. -f -2)
# Installation Locations
%define _prefix @package.prefix@
%define _bin_dir %{_prefix}/bin
%define _conf_dir @package.conf.dir@
%define _lib_dir %{_prefix}/lib
%define _lib64_dir %{_prefix}/lib64
%define _libexec_dir %{_prefix}/libexec
%define _log_dir @package.log.dir@
%define _pid_dir @package.pid.dir@
%define _sbin_dir %{_prefix}/sbin
%define _share_dir %{_prefix}/share
%define _var_dir @package.var.dir@
# Build time settings
%define _build_dir @package.build.dir@
%define _final_name @final.name@
%define debug_package %{nil}
# Disable brp-java-repack-jars for aspect J
%define __os_install_post \
/usr/lib/rpm/redhat/brp-compress \
%{!?__debug_package:/usr/lib/rpm/redhat/brp-strip %{__strip}} \
/usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} \
/usr/lib/rpm/redhat/brp-strip-comment-note %{__strip} %{__objdump} \
/usr/lib/rpm/brp-python-bytecompile %{nil}
# RPM searches perl files for dependancies and this breaks for non packaged perl lib
# like thrift so disable this
%define _use_internal_dependency_generator 0
%ifarch i386
%global hadoop_arch Linux-i386-32
%endif
%ifarch amd64 x86_64
%global hadoop_arch Linux-amd64-64
%endif
%ifarch noarch
%global hadoop_arch ""
%endif
Summary: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing
License: Apache License, Version 2.0
URL: http://hadoop.apache.org/core/
Vendor: Apache Software Foundation
Group: Development/Libraries
Name: %{name}
Version: %{version}
Release: %{release}
Source0: %{_final_name}-bin.tar.gz
Prefix: %{_prefix}
Prefix: %{_conf_dir}
Prefix: %{_log_dir}
Prefix: %{_pid_dir}
Buildroot: %{_build_dir}
Requires: sh-utils, textutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, hadoop-common >= %{major_version}.0, hadoop-common <= %{major_version}.9999
AutoReqProv: no
Provides: hadoop-hdfs
%description
The Apache Hadoop project develops open-source software for reliable, scalable,
distributed computing. Hadoop includes these subprojects:
HDFS: A distributed file system that provides high throughput access to application data.
%prep
%setup -n %{_final_name}
%build
if [ -d ${RPM_BUILD_DIR}%{_prefix} ]; then
rm -rf ${RPM_BUILD_DIR}%{_prefix}
fi
if [ -d ${RPM_BUILD_DIR}%{_log_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_log_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_conf_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_conf_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_pid_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_pid_dir}
fi
mkdir -p ${RPM_BUILD_DIR}%{_prefix}
mkdir -p ${RPM_BUILD_DIR}%{_bin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_lib_dir}
%ifarch amd64 x86_64
mkdir -p ${RPM_BUILD_DIR}%{_lib64_dir}
%endif
mkdir -p ${RPM_BUILD_DIR}%{_libexec_dir}
mkdir -p ${RPM_BUILD_DIR}%{_log_dir}
mkdir -p ${RPM_BUILD_DIR}%{_conf_dir}
mkdir -p ${RPM_BUILD_DIR}%{_pid_dir}
mkdir -p ${RPM_BUILD_DIR}%{_sbin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_share_dir}
mkdir -p ${RPM_BUILD_DIR}%{_var_dir}
mkdir -p ${RPM_BUILD_DIR}/etc/init.d
cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-namenode
cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-datanode.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-datanode
rm -f ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.*
rm -f ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-namenode.*
chmod 0755 ${RPM_BUILD_DIR}/etc/init.d/hadoop-*
#########################
#### INSTALL SECTION ####
#########################
%install
mv ${RPM_BUILD_DIR}/%{_final_name}/bin/* ${RPM_BUILD_DIR}%{_bin_dir}
rm ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/configuration.xsl
rm ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/hadoop-metrics2.properties
mv ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/* ${RPM_BUILD_DIR}%{_conf_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/lib/* ${RPM_BUILD_DIR}%{_lib_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/libexec/* ${RPM_BUILD_DIR}%{_libexec_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/sbin/* ${RPM_BUILD_DIR}%{_sbin_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/share/* ${RPM_BUILD_DIR}%{_share_dir}
rm -rf ${RPM_BUILD_DIR}/%{_final_name}/etc
%pre
getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -g 123 -r hadoop
/usr/sbin/useradd --comment "Hadoop HDFS" -u 201 --shell /bin/bash -M -r --groups hadoop --home %{_var_dir}/hdfs hdfs 2> /dev/null || :
%post
bash ${RPM_INSTALL_PREFIX0}/sbin/update-hdfs-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3}
%preun
bash ${RPM_INSTALL_PREFIX0}/sbin/update-hdfs-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3} \
--uninstall
%files
%defattr(-,root,root)
%attr(0755,root,hadoop) %{_log_dir}
%attr(0775,root,hadoop) %{_pid_dir}
%config(noreplace) %{_conf_dir}/hdfs-site.xml
%{_prefix}
%attr(0775,root,root) /etc/init.d/hadoop-namenode
%attr(0775,root,root) /etc/init.d/hadoop-datanode

View File

@ -1,45 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>${HADOOP_REPLICATION}</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>${HADOOP_NN_DIR}</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>${HADOOP_DN_DIR}</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp</value>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
<value>1.0f</value>
</property>
<property>
<name>dfs.namenode.safemode.extension</name>
<value>3</value>
</property>
</configuration>

View File

@ -1,128 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script configures hdfs-env.sh and symlinkis directories for
# relocating RPM locations.
usage() {
echo "
usage: $0 <parameters>
Required parameters:
--prefix=PREFIX path to install into
Optional parameters:
--arch=i386 OS Architecture
--bin-dir=PREFIX/bin Executable directory
--conf-dir=/etc/hadoop Configuration directory
--log-dir=/var/log/hadoop Log directory
--pid-dir=/var/run PID file location
--sbin-dir=PREFIX/sbin System executable directory
"
exit 1
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'arch:' \
-l 'prefix:' \
-l 'bin-dir:' \
-l 'conf-dir:' \
-l 'lib-dir:' \
-l 'log-dir:' \
-l 'pid-dir:' \
-l 'sbin-dir:' \
-l 'uninstall' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--arch)
ARCH=$2 ; shift 2
;;
--prefix)
PREFIX=$2 ; shift 2
;;
--bin-dir)
BIN_DIR=$2 ; shift 2
;;
--log-dir)
LOG_DIR=$2 ; shift 2
;;
--lib-dir)
LIB_DIR=$2 ; shift 2
;;
--conf-dir)
CONF_DIR=$2 ; shift 2
;;
--pid-dir)
PID_DIR=$2 ; shift 2
;;
--sbin-dir)
SBIN_DIR=$2 ; shift 2
;;
--uninstall)
UNINSTALL=1; shift
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
for var in PREFIX; do
if [ -z "$(eval "echo \$$var")" ]; then
echo Missing param: $var
usage
fi
done
ARCH=${ARCH:-i386}
BIN_DIR=${BIN_DIR:-$PREFIX/bin}
CONF_DIR=${CONF_DIR:-$PREFIX/etc/hadoop}
LIB_DIR=${LIB_DIR:-$PREFIX/lib}
LOG_DIR=${LOG_DIR:-$PREFIX/var/log}
PID_DIR=${PID_DIR:-$PREFIX/var/run}
SBIN_DIR=${SBIN_DIR:-$PREFIX/sbin}
UNINSTALL=${UNINSTALL:-0}
if [ "${ARCH}" != "i386" ]; then
LIB_DIR=${LIB_DIR}64
fi
if [ "${UNINSTALL}" -ne "1" ]; then
mkdir -p ${LOG_DIR}
chown hdfs:hadoop ${LOG_DIR}
chmod 755 ${LOG_DIR}
if [ ! -d ${PID_DIR} ]; then
mkdir -p ${PID_DIR}
chown root:hadoop ${PID_DIR}
chmod 775 ${PID_DIR}
fi
fi

View File

@ -1,15 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/etc/hadoop

View File

@ -1,24 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Package: hadoop-mapreduce
Version: @version@
Section: misc
Priority: optional
Provides: hadoop-mapreduce
Architecture: all
Depends: openjdk-6-jre-headless, hadoop-common, hadoop-hdfs
Maintainer: Apache Software Foundation <general@hadoop.apache.org>
Description: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing.
Distribution: development

View File

@ -1,24 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-mapred-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop

View File

@ -1,19 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/usr/sbin/groupdel hadoop 2> /dev/null >dev/null
exit 0

View File

@ -1,18 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
/usr/sbin/useradd --comment "Hadoop MapReduce" -u 202 --shell /bin/bash -M -r --groups hadoop --home /var/lib/hadoop/mapred mapred 2> /dev/null || :

View File

@ -1,25 +0,0 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bash /usr/sbin/update-mapred-env.sh \
--prefix=/usr \
--bin-dir=/usr/bin \
--sbin-dir=/usr/sbin \
--conf-dir=/etc/hadoop \
--log-dir=/var/log/hadoop \
--pid-dir=/var/run/hadoop \
--uninstal

View File

@ -1,143 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-historyserver
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Job Tracker server
### END INIT INFO
set -e
# /etc/init.d/hadoop-historyserver: start and stop the Apache Hadoop Job History daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-historyserver_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-historyserver_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Job History server not in use (/etc/hadoop/hadoop-historyserver_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Job History server" "hadoop-historyserver"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start historyserver; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Job History server" "hadoop-historyserver"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job History server" "hadoop-historyserver"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start historyserver; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job History server" "hadoop-historyserver"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start historyserver; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid ${JAVA_HOME}/bin/java hadoop-historyserver && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-historyserver {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,142 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-jobtracker
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Job Tracker server
### END INIT INFO
set -e
# /etc/init.d/hadoop-jobtracker: start and stop the Apache Hadoop Job Tracker daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-jobtracker_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-jobtracker_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Job Tracker server not in use (/etc/hadoop/hadoop-jobtracker_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Job Tracker server" "hadoop-jobtracker"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Job Tracker server" "hadoop-jobtracker"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start jobtracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid ${JAVA_HOME}/bin/java hadoop-jobtracker && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-jobtracker {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,142 +0,0 @@
#! /bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: hadoop-tasktracker
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Apache Hadoop Task Tracker server
### END INIT INFO
set -e
# /etc/init.d/hadoop-tasktracker: start and stop the Apache Hadoop Task Tracker daemon
test -x /usr/bin/hadoop || exit 0
( /usr/bin/hadoop 2>&1 | grep -q hadoop ) 2>/dev/null || exit 0
umask 022
if test -f /etc/default/hadoop-env.sh; then
. /etc/default/hadoop-env.sh
fi
. /lib/lsb/init-functions
# Are we running from init?
run_by_init() {
([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ]
}
check_for_no_start() {
# forget it if we're trying to start, and /etc/hadoop/hadoop-tasktracker_not_to_be_run exists
if [ -e /etc/hadoop/hadoop-tasktracker_not_to_be_run ]; then
if [ "$1" = log_end_msg ]; then
log_end_msg 0
fi
if ! run_by_init; then
log_action_msg "Apache Hadoop Task Tracker server not in use (/etc/hadoop/hadoop-tasktracker_not_to_be_run)"
fi
exit 0
fi
}
check_privsep_dir() {
# Create the PrivSep empty dir if necessary
if [ ! -d ${HADOOP_PID_DIR} ]; then
mkdir -p ${HADOOP_PID_DIR}
chown root:hadoop ${HADOOP_PID_DIR}
chmod 0775 ${HADOOP_PID_DIR}
fi
}
export PATH="${PATH:+$PATH:}/usr/sbin:/usr/bin"
case "$1" in
start)
check_privsep_dir
check_for_no_start
log_daemon_msg "Starting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
stop)
log_daemon_msg "Stopping Apache Hadoop Task Tracker server" "hadoop-tasktracker"
if start-stop-daemon --stop --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid; then
log_end_msg 0
else
log_end_msg 1
fi
;;
restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
start-stop-daemon --stop --quiet --oknodo --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
try-restart)
check_privsep_dir
log_daemon_msg "Restarting Apache Hadoop Task Tracker server" "hadoop-tasktracker"
set +e
start-stop-daemon --stop --quiet --retry 30 --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid
RET="$?"
set -e
case $RET in
0)
# old daemon stopped
check_for_no_start log_end_msg
if start-stop-daemon --start --quiet --oknodo --pidfile ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid -c mapred -x ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh -- --config ${HADOOP_CONF_DIR} start tasktracker; then
log_end_msg 0
else
log_end_msg 1
fi
;;
1)
# daemon not running
log_progress_msg "(not running)"
log_end_msg 0
;;
*)
# failed to stop
log_progress_msg "(failed to stop)"
log_end_msg 1
;;
esac
;;
status)
status_of_proc -p ${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid ${JAVA_HOME}/bin/java hadoop-tasktracker && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/hadoop-tasktracker {start|stop|restart|try-restart|status}"
exit 1
esac
exit 0

View File

@ -1,85 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop historyserver
#
# chkconfig: 2345 90 10
# description: Hadoop historyserver
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-mapred-historyserver.pid"
desc="Hadoop historyserver daemon"
export HADOOP_PREFIX=${HADOOP_PREFIX:-/usr}
start() {
echo -n $"Starting $desc (hadoop-historyserver): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start historyserver
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-historyserver
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-historyserver): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop historyserver
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-historyserver $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-historyserver ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,84 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop jobtracker
#
# chkconfig: 2345 90 10
# description: Hadoop jobtracker
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-mapred-jobtracker.pid"
desc="Hadoop jobtracker daemon"
start() {
echo -n $"Starting $desc (hadoop-jobtracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start jobtracker
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-jobtracker
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-jobtracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop jobtracker
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-jobtracker $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-jobtracker ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,84 +0,0 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starts a Hadoop tasktracker
#
# chkconfig: 2345 90 10
# description: Hadoop tasktracker
source /etc/rc.d/init.d/functions
source /etc/default/hadoop-env.sh
RETVAL=0
PIDFILE="${HADOOP_PID_DIR}/hadoop-mapred-tasktracker.pid"
desc="Hadoop tasktracker daemon"
start() {
echo -n $"Starting $desc (hadoop-tasktracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" start tasktracker
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hadoop-tasktracker
return $RETVAL
}
stop() {
echo -n $"Stopping $desc (hadoop-tasktracker): "
daemon --user mapred ${HADOOP_PREFIX}/sbin/hadoop-daemon.sh --config "${HADOOP_CONF_DIR}" stop tasktracker
RETVAL=$?
sleep 5
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hadoop-tasktracker $PIDFILE
}
restart() {
stop
start
}
checkstatus(){
status -p $PIDFILE ${JAVA_HOME}/bin/java
RETVAL=$?
}
condrestart(){
[ -e /var/lock/subsys/hadoop-tasktracker ] && restart || :
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
checkstatus
;;
restart)
restart
;;
condrestart)
condrestart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart}"
exit 1
esac
exit $RETVAL

View File

@ -1,179 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# RPM Spec file for Hadoop version @version@
#
%define name hadoop-mapreduce
%define version @version@
%define release @package.release@
%define major_version %(echo %{version} | cut -d. -f -2)
# Installation Locations
%define _prefix @package.prefix@
%define _bin_dir %{_prefix}/bin
%define _conf_dir @package.conf.dir@
%define _include_dir %{_prefix}/include
%define _lib_dir %{_prefix}/lib
%define _lib64_dir %{_prefix}/lib64
%define _libexec_dir %{_prefix}/libexec
%define _log_dir @package.log.dir@
%define _pid_dir @package.pid.dir@
%define _sbin_dir %{_prefix}/sbin
%define _share_dir %{_prefix}/share
%define _var_dir @package.var.dir@
# Build time settings
%define _build_dir @package.build.dir@
%define _final_name @final.name@
%define debug_package %{nil}
# Disable brp-java-repack-jars for aspect J
%define __os_install_post \
/usr/lib/rpm/redhat/brp-compress \
%{!?__debug_package:/usr/lib/rpm/redhat/brp-strip %{__strip}} \
/usr/lib/rpm/redhat/brp-strip-static-archive %{__strip} \
/usr/lib/rpm/redhat/brp-strip-comment-note %{__strip} %{__objdump} \
/usr/lib/rpm/brp-python-bytecompile %{nil}
# RPM searches perl files for dependancies and this breaks for non packaged perl lib
# like thrift so disable this
%define _use_internal_dependency_generator 0
%ifarch i386
%global hadoop_arch Linux-i386-32
%endif
%ifarch amd64 x86_64
%global hadoop_arch Linux-amd64-64
%endif
%ifarch noarch
%global hadoop_arch ""
%endif
Summary: The Apache Hadoop project develops open-source software for reliable, scalable, distributed computing
License: Apache License, Version 2.0
URL: http://hadoop.apache.org/core/
Vendor: Apache Software Foundation
Group: Development/Libraries
Name: %{name}
Version: %{version}
Release: %{release}
Source0: %{_final_name}-bin.tar.gz
Prefix: %{_prefix}
Prefix: %{_conf_dir}
Prefix: %{_log_dir}
Prefix: %{_pid_dir}
Buildroot: %{_build_dir}
Requires: sh-utils, textutils, /usr/sbin/useradd, /usr/sbin/usermod, /sbin/chkconfig, /sbin/service, hadoop-common >= %{major_version}, , hadoop-common <= %{major_version}.9999, hadoop-hdfs >= %{major_version}, hadoop-hdfs <= %{major_version}.9999
AutoReqProv: no
Provides: hadoop-mapreduce
%description
The Apache Hadoop project develops open-source software for reliable, scalable,
distributed computing. Hadoop includes these subprojects:
MapReduce: A software framework for distributed processing of large data sets on compute clusters.
%prep
%setup -n %{_final_name}
%build
if [ -d ${RPM_BUILD_DIR}%{_prefix} ]; then
rm -rf ${RPM_BUILD_DIR}%{_prefix}
fi
if [ -d ${RPM_BUILD_DIR}%{_log_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_log_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_conf_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_conf_dir}
fi
if [ -d ${RPM_BUILD_DIR}%{_pid_dir} ]; then
rm -rf ${RPM_BUILD_DIR}%{_pid_dir}
fi
mkdir -p ${RPM_BUILD_DIR}%{_prefix}
mkdir -p ${RPM_BUILD_DIR}%{_bin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_lib_dir}
%ifarch amd64 x86_64
mkdir -p ${RPM_BUILD_DIR}%{_lib64_dir}
%endif
mkdir -p ${RPM_BUILD_DIR}%{_libexec_dir}
mkdir -p ${RPM_BUILD_DIR}%{_log_dir}
mkdir -p ${RPM_BUILD_DIR}%{_conf_dir}
mkdir -p ${RPM_BUILD_DIR}%{_pid_dir}
mkdir -p ${RPM_BUILD_DIR}%{_sbin_dir}
mkdir -p ${RPM_BUILD_DIR}%{_share_dir}
mkdir -p ${RPM_BUILD_DIR}%{_var_dir}
mkdir -p ${RPM_BUILD_DIR}/etc/init.d
cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-jobtracker.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-jobtracker
cp ${RPM_BUILD_DIR}/%{_final_name}/sbin/hadoop-tasktracker.redhat ${RPM_BUILD_DIR}/etc/init.d/hadoop-tasktracker
chmod 0755 ${RPM_BUILD_DIR}/etc/init.d/hadoop-*
#########################
#### INSTALL SECTION ####
#########################
%install
mv ${RPM_BUILD_DIR}/%{_final_name}/bin/* ${RPM_BUILD_DIR}%{_bin_dir}
rm -f ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/configuration.xsl
rm -f ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/hadoop-metrics2.properties
mv ${RPM_BUILD_DIR}/%{_final_name}/etc/hadoop/* ${RPM_BUILD_DIR}%{_conf_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/include/* ${RPM_BUILD_DIR}%{_include_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/lib/* ${RPM_BUILD_DIR}%{_lib_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/libexec/* ${RPM_BUILD_DIR}%{_libexec_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/sbin/* ${RPM_BUILD_DIR}%{_sbin_dir}
mv ${RPM_BUILD_DIR}/%{_final_name}/share/* ${RPM_BUILD_DIR}%{_share_dir}
rm -rf ${RPM_BUILD_DIR}/%{_final_name}/etc
%pre
getent group hadoop 2>/dev/null >/dev/null || /usr/sbin/groupadd -g 123 -r hadoop
/usr/sbin/useradd --comment "Hadoop MapReduce" -u 202 --shell /bin/bash -M -r --groups hadoop --home %{_var_dir}/mapred mapred 2> /dev/null || :
%post
bash ${RPM_INSTALL_PREFIX0}/sbin/update-mapred-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3}
%preun
bash ${RPM_INSTALL_PREFIX0}/sbin/update-mapred-env.sh \
--prefix=${RPM_INSTALL_PREFIX0} \
--bin-dir=${RPM_INSTALL_PREFIX0}/bin \
--sbin-dir=${RPM_INSTALL_PREFIX0}/sbin \
--conf-dir=${RPM_INSTALL_PREFIX1} \
--log-dir=${RPM_INSTALL_PREFIX2} \
--pid-dir=${RPM_INSTALL_PREFIX3} \
--uninstall
%files
%defattr(-,root,root)
%attr(0755,root,hadoop) %{_log_dir}
%attr(0775,root,hadoop) %{_pid_dir}
%config(noreplace) %{_conf_dir}/mapred-site.xml
%config(noreplace) %{_conf_dir}/capacity-scheduler.xml
%config(noreplace) %{_conf_dir}/fair-scheduler.xml
%config(noreplace) %{_conf_dir}/mapred-queues.xml
%config(noreplace) %{_conf_dir}/taskcontroller.cfg
%{_prefix}
%attr(0775,root,root) /etc/init.d/hadoop-jobtracker
%attr(0775,root,root) /etc/init.d/hadoop-tasktracker

View File

@ -1,31 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>${HADOOP_JT_HOST}</value>
</property>
<property>
<name>mapred.system.dir</name>
<value>/user/mapred/system</value>
</property>
<property>
<name>mapred.local.dir</name>
<value>${HADOOP_MAPRED_DIR}</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp</value>
</property>
<property>
<name>mapred.jobtracker.taskScheduler</name>
<value>${HADOOP_TASK_SCHEDULER}</value>
</property>
</configuration>

View File

@ -1,139 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script configures mapred-env.sh and symlinkis directories for
# relocating RPM locations.
usage() {
echo "
usage: $0 <parameters>
Required parameters:
--prefix=PREFIX path to install into
Optional parameters:
--arch=i386 OS Architecture
--bin-dir=PREFIX/bin Executable directory
--conf-dir=/etc/hadoop Configuration directory
--log-dir=/var/log/hadoop Log directory
--pid-dir=/var/run PID file location
--sbin-dir=PREFIX/sbin System executable directory
"
exit 1
}
template_generator() {
cat $1 |
while read line ; do
while [[ "$line" =~ '(\$\{[a-zA-Z_][a-zA-Z_0-9]*\})' ]] ; do
LHS=${BASH_REMATCH[1]}
RHS="$(eval echo "\"$LHS\"")"
line=${line//$LHS/$RHS}
done
echo $line >> $2
done
}
OPTS=$(getopt \
-n $0 \
-o '' \
-l 'arch:' \
-l 'prefix:' \
-l 'bin-dir:' \
-l 'conf-dir:' \
-l 'lib-dir:' \
-l 'log-dir:' \
-l 'pid-dir:' \
-l 'sbin-dir:' \
-l 'uninstall' \
-- "$@")
if [ $? != 0 ] ; then
usage
fi
eval set -- "${OPTS}"
while true ; do
case "$1" in
--arch)
ARCH=$2 ; shift 2
;;
--prefix)
PREFIX=$2 ; shift 2
;;
--bin-dir)
BIN_DIR=$2 ; shift 2
;;
--log-dir)
LOG_DIR=$2 ; shift 2
;;
--lib-dir)
LIB_DIR=$2 ; shift 2
;;
--conf-dir)
CONF_DIR=$2 ; shift 2
;;
--pid-dir)
PID_DIR=$2 ; shift 2
;;
--sbin-dir)
SBIN_DIR=$2 ; shift 2
;;
--uninstall)
UNINSTALL=1; shift
;;
--)
shift ; break
;;
*)
echo "Unknown option: $1"
usage
exit 1
;;
esac
done
for var in PREFIX; do
if [ -z "$(eval "echo \$$var")" ]; then
echo Missing param: $var
usage
fi
done
ARCH=${ARCH:-i386}
BIN_DIR=${BIN_DIR:-$PREFIX/bin}
CONF_DIR=${CONF_DIR:-$PREFIX/etc/hadoop}
LIB_DIR=${LIB_DIR:-$PREFIX/lib}
LOG_DIR=${LOG_DIR:-$PREFIX/var/log}
PID_DIR=${PID_DIR:-$PREFIX/var/run}
SBIN_DIR=${SBIN_DIR:-$PREFIX/sbin}
UNINSTALL=${UNINSTALL:-0}
if [ "${ARCH}" != "i386" ]; then
LIB_DIR=${LIB_DIR}64
fi
if [ "${UNINSTALL}" -ne "1" ]; then
mkdir -p ${LOG_DIR}
chown mapred:hadoop ${LOG_DIR}
chmod 755 ${LOG_DIR}
if [ ! -d ${PID_DIR} ]; then
mkdir -p ${PID_DIR}
chown root:hadoop ${PID_DIR}
chmod 775 ${PID_DIR}
fi
fi