diff --git a/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj b/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj deleted file mode 100644 index 76f51294098..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj +++ /dev/null @@ -1,400 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.File; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.HashMap; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.Properties; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.conf.Configuration; - -/** - * Default DaemonProtocolAspect which is used to provide default implementation - * for all the common daemon methods. If a daemon requires more specialized - * version of method, it is responsibility of the DaemonClient to introduce the - * same in woven classes. - * - */ -public aspect DaemonProtocolAspect { - - private boolean DaemonProtocol.ready; - - @SuppressWarnings("unchecked") - private HashMap> DaemonProtocol.actions = - new HashMap>(); - private static final Log LOG = LogFactory.getLog( - DaemonProtocolAspect.class.getName()); - - private static FsPermission defaultPermission = new FsPermission( - FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE); - - /** - * Set if the daemon process is ready or not, concrete daemon protocol should - * implement pointcuts to determine when the daemon is ready and use the - * setter to set the ready state. - * - * @param ready - * true if the Daemon is ready. - */ - public void DaemonProtocol.setReady(boolean ready) { - this.ready = ready; - } - - /** - * Checks if the daemon process is alive or not. - * - * @throws IOException - * if daemon is not alive. - */ - public void DaemonProtocol.ping() throws IOException { - } - - /** - * Checks if the daemon process is ready to accepting RPC connections after it - * finishes initialization.
- * - * @return true if ready to accept connection. - * - * @throws IOException - */ - public boolean DaemonProtocol.isReady() throws IOException { - return ready; - } - - /** - * Returns the process related information regarding the daemon process.
- * - * @return process information. - * @throws IOException - */ - public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException { - int activeThreadCount = Thread.activeCount(); - long currentTime = System.currentTimeMillis(); - long maxmem = Runtime.getRuntime().maxMemory(); - long freemem = Runtime.getRuntime().freeMemory(); - long totalmem = Runtime.getRuntime().totalMemory(); - Map envMap = System.getenv(); - Properties sysProps = System.getProperties(); - Map props = new HashMap(); - for (Map.Entry entry : sysProps.entrySet()) { - props.put((String) entry.getKey(), (String) entry.getValue()); - } - ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime, - freemem, maxmem, totalmem, envMap, props); - return info; - } - - public void DaemonProtocol.enable(List> faults) throws IOException { - } - - public void DaemonProtocol.disableAll() throws IOException { - } - - public abstract Configuration DaemonProtocol.getDaemonConf() - throws IOException; - - public FileStatus DaemonProtocol.getFileStatus(String path, boolean local) - throws IOException { - Path p = new Path(path); - FileSystem fs = getFS(p, local); - p.makeQualified(fs); - FileStatus fileStatus = fs.getFileStatus(p); - return cloneFileStatus(fileStatus); - } - - /** - * Create a file with given permissions in a file system. - * @param path - source path where the file has to create. - * @param fileName - file name. - * @param permission - file permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void DaemonProtocol.createFile(String path, String fileName, - FsPermission permission, boolean local) throws IOException { - Path p = new Path(path); - FileSystem fs = getFS(p, local); - Path filePath = new Path(path, fileName); - fs.create(filePath); - if (permission == null) { - fs.setPermission(filePath, defaultPermission); - } else { - fs.setPermission(filePath, permission); - } - fs.close(); - } - - /** - * Create a folder with given permissions in a file system. - * @param path - source path where the file has to be creating. - * @param folderName - folder name. - * @param permission - folder permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void DaemonProtocol.createFolder(String path, String folderName, - FsPermission permission, boolean local) throws IOException { - Path p = new Path(path); - FileSystem fs = getFS(p, local); - Path folderPath = new Path(path, folderName); - fs.mkdirs(folderPath); - if (permission == null) { - fs.setPermission(folderPath, defaultPermission); - } else { - fs.setPermission(folderPath, permission); - } - fs.close(); - } - - public FileStatus[] DaemonProtocol.listStatus(String path, boolean local) - throws IOException { - Path p = new Path(path); - FileSystem fs = getFS(p, local); - FileStatus[] status = fs.listStatus(p); - if (status != null) { - FileStatus[] result = new FileStatus[status.length]; - int i = 0; - for (FileStatus fileStatus : status) { - result[i++] = cloneFileStatus(fileStatus); - } - return result; - } - return status; - } - - /** - * FileStatus object may not be serializable. Clone it into raw FileStatus - * object. - */ - private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) { - return new FileStatus(fileStatus.getLen(), - fileStatus.isDir(), - fileStatus.getReplication(), - fileStatus.getBlockSize(), - fileStatus.getModificationTime(), - fileStatus.getAccessTime(), - fileStatus.getPermission(), - fileStatus.getOwner(), - fileStatus.getGroup(), - fileStatus.getPath()); - } - - private FileSystem DaemonProtocol.getFS(final Path path, final boolean local) - throws IOException { - FileSystem ret = null; - try { - ret = UserGroupInformation.getLoginUser().doAs ( - new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - FileSystem fs = null; - if (local) { - fs = FileSystem.getLocal(getDaemonConf()); - } else { - fs = path.getFileSystem(getDaemonConf()); - } - return fs; - } - }); - } catch (InterruptedException ie) { - } - return ret; - } - - @SuppressWarnings("unchecked") - public ControlAction[] DaemonProtocol.getActions(Writable key) - throws IOException { - synchronized (actions) { - List actionList = actions.get(key); - if(actionList == null) { - return new ControlAction[0]; - } else { - return (ControlAction[]) actionList.toArray(new ControlAction[actionList - .size()]); - } - } - } - - - @SuppressWarnings("unchecked") - public void DaemonProtocol.sendAction(ControlAction action) - throws IOException { - synchronized (actions) { - List actionList = actions.get(action.getTarget()); - if(actionList == null) { - actionList = new ArrayList(); - actions.put(action.getTarget(), actionList); - } - actionList.add(action); - } - } - - @SuppressWarnings("unchecked") - public boolean DaemonProtocol.isActionPending(ControlAction action) - throws IOException{ - synchronized (actions) { - List actionList = actions.get(action.getTarget()); - if(actionList == null) { - return false; - } else { - return actionList.contains(action); - } - } - } - - - @SuppressWarnings("unchecked") - public void DaemonProtocol.removeAction(ControlAction action) - throws IOException { - synchronized (actions) { - List actionList = actions.get(action.getTarget()); - if(actionList == null) { - return; - } else { - actionList.remove(action); - } - } - } - - public void DaemonProtocol.clearActions() throws IOException { - synchronized (actions) { - actions.clear(); - } - } - - public String DaemonProtocol.getFilePattern() { - //We use the environment variable HADOOP_LOGFILE to get the - //pattern to use in the search. - String logDir = System.getProperty("hadoop.log.dir"); - String daemonLogPattern = System.getProperty("hadoop.log.file"); - if(daemonLogPattern == null && daemonLogPattern.isEmpty()) { - return "*"; - } - return logDir+File.separator+daemonLogPattern+"*"; - } - - public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern, - String[] list) throws IOException { - StringBuffer filePattern = new StringBuffer(getFilePattern()); - String[] cmd = null; - if (list != null) { - StringBuffer filterExpPattern = new StringBuffer(); - int index=0; - for (String excludeExp : list) { - if (index++ < list.length -1) { - filterExpPattern.append("grep -v " + excludeExp + " | "); - } else { - filterExpPattern.append("grep -v " + excludeExp + " | wc -l"); - } - } - cmd = new String[] { - "bash", - "-c", - "grep " - + pattern + " " + filePattern + " | " - + filterExpPattern}; - } else { - cmd = new String[] { - "bash", - "-c", - "grep -c " - + pattern + " " + filePattern - + " | awk -F: '{s+=$2} END {print s}'" }; - } - ShellCommandExecutor shexec = new ShellCommandExecutor(cmd); - shexec.execute(); - String output = shexec.getOutput(); - return Integer.parseInt(output.replaceAll("\n", "").trim()); - } - - /** - * This method is used for suspending the process. - * @param pid process id - * @throws IOException if an I/O error occurs. - * @return true if process is suspended otherwise false. - */ - public boolean DaemonProtocol.suspendProcess(String pid) throws IOException { - String suspendCmd = getDaemonConf().get("test.system.hdrc.suspend.cmd", - "kill -SIGSTOP"); - String [] command = {"bash", "-c", suspendCmd + " " + pid}; - ShellCommandExecutor shexec = new ShellCommandExecutor(command); - try { - shexec.execute(); - } catch (Shell.ExitCodeException e) { - LOG.warn("suspended process throws an exitcode " - + "exception for not being suspended the given process id."); - return false; - } - LOG.info("The suspend process command is :" - + shexec.toString() - + " and the output for the command is " - + shexec.getOutput()); - return true; - } - - /** - * This method is used for resuming the process - * @param pid process id of suspended process. - * @throws IOException if an I/O error occurs. - * @return true if suspeneded process is resumed otherwise false. - */ - public boolean DaemonProtocol.resumeProcess(String pid) throws IOException { - String resumeCmd = getDaemonConf().get("test.system.hdrc.resume.cmd", - "kill -SIGCONT"); - String [] command = {"bash", "-c", resumeCmd + " " + pid}; - ShellCommandExecutor shexec = new ShellCommandExecutor(command); - try { - shexec.execute(); - } catch(Shell.ExitCodeException e) { - LOG.warn("Resume process throws an exitcode " - + "exception for not being resumed the given process id."); - return false; - } - LOG.info("The resume process command is :" - + shexec.toString() - + " and the output for the command is " - + shexec.getOutput()); - return true; - } - - private String DaemonProtocol.user = null; - - public String DaemonProtocol.getDaemonUser() { - return user; - } - - public void DaemonProtocol.setUser(String user) { - this.user = user; - } -} - diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in deleted file mode 100644 index b64b820b3ec..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in +++ /dev/null @@ -1,41 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -OBJS=main.o runAs.o -CC=@CC@ -CFLAGS = @CFLAGS@ -BINARY=runAs -installdir = @prefix@ - -all: $(OBJS) - $(CC) $(CFLAG) -o $(BINARY) $(OBJS) - -main.o: runAs.o main.c - $(CC) $(CFLAG) -o main.o -c main.c - -runAs.o: runAs.h runAs.c - $(CC) $(CFLAG) -o runAs.o -c runAs.c - -clean: - rm -rf $(BINARY) $(OBJS) $(TESTOBJS) - -install: all - cp $(BINARY) $(installdir) - -uninstall: - rm -rf $(installdir)/$(BINARY) - rm -rf $(BINARY) diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure deleted file mode 100644 index acd5bfa10d1..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure +++ /dev/null @@ -1,5117 +0,0 @@ -#! /bin/sh -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.65 for runAs 0.1. -# -# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, -# Inc. -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='runAs' -PACKAGE_TARNAME='runas' -PACKAGE_VERSION='0.1' -PACKAGE_STRING='runAs 0.1' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -ac_default_prefix=. -ac_unique_file="main.c" -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='SET_MAKE -LTLIBOBJS -LIBOBJS -EGREP -GREP -CPP -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -with_home -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -CPP' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information." - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. - If a cross compiler is detected then cross compile mode will be used." >&2 - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures runAs 0.1 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/runas] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of runAs 0.1:";; - esac - cat <<\_ACEOF - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) ---with-home path to hadoop home dir - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - CPP C preprocessor - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -runAs configure 0.1 -generated by GNU Autoconf 2.65 - -Copyright (C) 2009 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_compile - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_type - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_func -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by runAs $as_me 0.1, which was -generated by GNU Autoconf 2.65. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - cat <<\_ASBOX -## ---------------- ## -## Cache variables. ## -## ---------------- ## -_ASBOX - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - cat <<\_ASBOX -## ----------------- ## -## Output variables. ## -## ----------------- ## -_ASBOX - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - cat <<\_ASBOX -## ------------------- ## -## File substitutions. ## -## ------------------- ## -_ASBOX - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - cat <<\_ASBOX -## ----------- ## -## confdefs.h. ## -## ----------- ## -_ASBOX - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - ac_site_file1=$CONFIG_SITE -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -#changing default prefix value to empty string, so that binary does not -#gets installed within system - - -#add new arguments --with-home - -# Check whether --with-home was given. -if test "${with_home+set}" = set; then : - withval=$with_home; -fi - - -ac_config_headers="$ac_config_headers runAs.h" - - -# Checks for programs. -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "no acceptable C compiler found in \$PATH -See \`config.log' for more details." "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "C compiler cannot create executables -See \`config.log' for more details." "$LINENO" 5; }; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details." "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details." "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if test "${ac_cv_objext+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of object files: cannot compile -See \`config.log' for more details." "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if test "${ac_cv_c_compiler_gnu+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if test "${ac_cv_prog_cc_g+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if test "${ac_cv_prog_cc_c89+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# Checks for libraries. - -# Checks for header files. - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if test "${ac_cv_prog_CPP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details." "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if test "${ac_cv_path_GREP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if test "${ac_cv_path_EGREP+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if test "${ac_cv_header_stdc+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_header in stdlib.h string.h unistd.h fcntl.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -#check for HADOOP_PREFIX -if test "$with_home" != "" -then -cat >>confdefs.h <<_ACEOF -#define HADOOP_PREFIX "$with_home" -_ACEOF - -fi - -# Checks for typedefs, structures, and compiler characteristics. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 -$as_echo_n "checking for an ANSI C-conforming const... " >&6; } -if test "${ac_cv_c_const+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -/* FIXME: Include the comments suggested by Paul. */ -#ifndef __cplusplus - /* Ultrix mips cc rejects this. */ - typedef int charset[2]; - const charset cs; - /* SunOS 4.1.1 cc rejects this. */ - char const *const *pcpcc; - char **ppc; - /* NEC SVR4.0.2 mips cc rejects this. */ - struct point {int x, y;}; - static struct point const zero = {0,0}; - /* AIX XL C 1.02.0.0 rejects this. - It does not let you subtract one const X* pointer from another in - an arm of an if-expression whose if-part is not a constant - expression */ - const char *g = "string"; - pcpcc = &g + (g ? g-g : 0); - /* HPUX 7.0 cc rejects these. */ - ++pcpcc; - ppc = (char**) pcpcc; - pcpcc = (char const *const *) ppc; - { /* SCO 3.2v4 cc rejects this. */ - char *t; - char const *s = 0 ? (char *) 0 : (char const *) 0; - - *t++ = 0; - if (s) return 0; - } - { /* Someone thinks the Sun supposedly-ANSI compiler will reject this. */ - int x[] = {25, 17}; - const int *foo = &x[0]; - ++foo; - } - { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */ - typedef const int *iptr; - iptr p = 0; - ++p; - } - { /* AIX XL C 1.02.0.0 rejects this saying - "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */ - struct s { int j; const int *ap[3]; }; - struct s *b; b->j = 5; - } - { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */ - const int foo = 10; - if (!foo) return 0; - } - return !cs[0] && !zero.x; -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_const=yes -else - ac_cv_c_const=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 -$as_echo "$ac_cv_c_const" >&6; } -if test $ac_cv_c_const = no; then - -$as_echo "#define const /**/" >>confdefs.h - -fi - -ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default" -if test "x$ac_cv_type_pid_t" = x""yes; then : - -else - -cat >>confdefs.h <<_ACEOF -#define pid_t int -_ACEOF - -fi - -ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default" -if test "x$ac_cv_type_mode_t" = x""yes; then : - -else - -cat >>confdefs.h <<_ACEOF -#define mode_t int -_ACEOF - -fi - -ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" -if test "x$ac_cv_type_size_t" = x""yes; then : - -else - -cat >>confdefs.h <<_ACEOF -#define size_t unsigned int -_ACEOF - -fi - - -# Checks for library functions. -for ac_header in stdlib.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" -if test "x$ac_cv_header_stdlib_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_STDLIB_H 1 -_ACEOF - -fi - -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5 -$as_echo_n "checking for GNU libc compatible malloc... " >&6; } -if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - ac_cv_func_malloc_0_nonnull=no -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#if defined STDC_HEADERS || defined HAVE_STDLIB_H -# include -#else -char *malloc (); -#endif - -int -main () -{ -return ! malloc (0); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_func_malloc_0_nonnull=yes -else - ac_cv_func_malloc_0_nonnull=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5 -$as_echo "$ac_cv_func_malloc_0_nonnull" >&6; } -if test $ac_cv_func_malloc_0_nonnull = yes; then : - -$as_echo "#define HAVE_MALLOC 1" >>confdefs.h - -else - $as_echo "#define HAVE_MALLOC 0" >>confdefs.h - - case " $LIBOBJS " in - *" malloc.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS malloc.$ac_objext" - ;; -esac - - -$as_echo "#define malloc rpl_malloc" >>confdefs.h - -fi - - -for ac_header in stdlib.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default" -if test "x$ac_cv_header_stdlib_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_STDLIB_H 1 -_ACEOF - -fi - -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5 -$as_echo_n "checking for GNU libc compatible realloc... " >&6; } -if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - ac_cv_func_realloc_0_nonnull=no -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#if defined STDC_HEADERS || defined HAVE_STDLIB_H -# include -#else -char *realloc (); -#endif - -int -main () -{ -return ! realloc (0, 0); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_func_realloc_0_nonnull=yes -else - ac_cv_func_realloc_0_nonnull=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5 -$as_echo "$ac_cv_func_realloc_0_nonnull" >&6; } -if test $ac_cv_func_realloc_0_nonnull = yes; then : - -$as_echo "#define HAVE_REALLOC 1" >>confdefs.h - -else - $as_echo "#define HAVE_REALLOC 0" >>confdefs.h - - case " $LIBOBJS " in - *" realloc.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS realloc.$ac_objext" - ;; -esac - - -$as_echo "#define realloc rpl_realloc" >>confdefs.h - -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5 -$as_echo_n "checking for uid_t in sys/types.h... " >&6; } -if test "${ac_cv_type_uid_t+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uid_t" >/dev/null 2>&1; then : - ac_cv_type_uid_t=yes -else - ac_cv_type_uid_t=no -fi -rm -f conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5 -$as_echo "$ac_cv_type_uid_t" >&6; } -if test $ac_cv_type_uid_t = no; then - -$as_echo "#define uid_t int" >>confdefs.h - - -$as_echo "#define gid_t int" >>confdefs.h - -fi - -for ac_header in unistd.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default" -if test "x$ac_cv_header_unistd_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_UNISTD_H 1 -_ACEOF - -fi - -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5 -$as_echo_n "checking for working chown... " >&6; } -if test "${ac_cv_func_chown_works+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - if test "$cross_compiling" = yes; then : - ac_cv_func_chown_works=no -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -#include - -int -main () -{ - char *f = "conftest.chown"; - struct stat before, after; - - if (creat (f, 0600) < 0) - return 1; - if (stat (f, &before) < 0) - return 1; - if (chown (f, (uid_t) -1, (gid_t) -1) == -1) - return 1; - if (stat (f, &after) < 0) - return 1; - return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_func_chown_works=yes -else - ac_cv_func_chown_works=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -rm -f conftest.chown - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5 -$as_echo "$ac_cv_func_chown_works" >&6; } -if test $ac_cv_func_chown_works = yes; then - -$as_echo "#define HAVE_CHOWN 1" >>confdefs.h - -fi - -for ac_func in strerror memset mkdir rmdir strdup -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -eval as_val=\$$as_ac_var - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - -ac_config_files="$ac_config_files Makefile" - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - test "x$cache_file" != "x/dev/null" && - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - cat confcache >$cache_file - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: ${CONFIG_STATUS=./config.status} -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' -else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' -fi -as_executable_p=$as_test_x - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by runAs $as_me 0.1, which was -generated by GNU Autoconf 2.65. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -runAs config.status 0.1 -configured by $0, generated by GNU Autoconf 2.65, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2009 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "runAs.h") CONFIG_HEADERS="$CONFIG_HEADERS runAs.h" ;; - "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - - *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= - trap 'exit_status=$? - { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -n "$tmp" && test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5 - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ - || as_fn_error "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove $(srcdir), -# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=/{ -s/:*\$(srcdir):*/:/ -s/:*\${srcdir}:*/:/ -s/:*@srcdir@:*/:/ -s/^\([^=]*=[ ]*\):*/\1/ -s/:*$// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_t=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_t"; then - break - elif $ac_last_try; then - as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS " -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$tmp/stdin" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined." >&2;} - - rm -f "$tmp/stdin" - case $ac_file in - -) cat "$tmp/out" && rm -f "$tmp/out";; - *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; - esac \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" - } >"$tmp/config.h" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$tmp/config.h" "$ac_file" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error "could not create -" "$LINENO" 5 - fi - ;; - - - esac - -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit $? -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5 -$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; } -if test "${ac_cv_header_stdbool_h+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -#include -#ifndef bool - "error: bool is not defined" -#endif -#ifndef false - "error: false is not defined" -#endif -#if false - "error: false is not 0" -#endif -#ifndef true - "error: true is not defined" -#endif -#if true != 1 - "error: true is not 1" -#endif -#ifndef __bool_true_false_are_defined - "error: __bool_true_false_are_defined is not defined" -#endif - - struct s { _Bool s: 1; _Bool t; } s; - - char a[true == 1 ? 1 : -1]; - char b[false == 0 ? 1 : -1]; - char c[__bool_true_false_are_defined == 1 ? 1 : -1]; - char d[(bool) 0.5 == true ? 1 : -1]; - bool e = &s; - char f[(_Bool) 0.0 == false ? 1 : -1]; - char g[true]; - char h[sizeof (_Bool)]; - char i[sizeof s.t]; - enum { j = false, k = true, l = false * true, m = true * 256 }; - /* The following fails for - HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */ - _Bool n[m]; - char o[sizeof n == m * sizeof n[0] ? 1 : -1]; - char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1]; -# if defined __xlc__ || defined __GNUC__ - /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0 - reported by James Lemley on 2005-10-05; see - http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html - This test is not quite right, since xlc is allowed to - reject this program, as the initializer for xlcbug is - not one of the forms that C requires support for. - However, doing the test right would require a runtime - test, and that would make cross-compilation harder. - Let us hope that IBM fixes the xlc bug, and also adds - support for this kind of constant expression. In the - meantime, this test will reject xlc, which is OK, since - our stdbool.h substitute should suffice. We also test - this with GCC, where it should work, to detect more - quickly whether someone messes up the test in the - future. */ - char digs[] = "0123456789"; - int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1); -# endif - /* Catch a bug in an HP-UX C compiler. See - http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html - http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html - */ - _Bool q = true; - _Bool *pq = &q; - -int -main () -{ - - *pq |= q; - *pq |= ! q; - /* Refer to every declared value, to avoid compiler optimizations. */ - return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l - + !m + !n + !o + !p + !q + !pq); - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdbool_h=yes -else - ac_cv_header_stdbool_h=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5 -$as_echo "$ac_cv_header_stdbool_h" >&6; } -ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default" -if test "x$ac_cv_type__Bool" = x""yes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE__BOOL 1 -_ACEOF - - -fi - -if test $ac_cv_header_stdbool_h = yes; then - -$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 -$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } -set x ${MAKE-make} -ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` -if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat >conftest.make <<\_ACEOF -SHELL = /bin/sh -all: - @echo '@@@%%%=$(MAKE)=@@@%%%' -_ACEOF -# GNU make sometimes prints "make[1]: Entering...", which would confuse us. -case `${MAKE-make} -f conftest.make 2>/dev/null` in - *@@@%%%=?*=@@@%%%*) - eval ac_cv_prog_make_${ac_make}_set=yes;; - *) - eval ac_cv_prog_make_${ac_make}_set=no;; -esac -rm -f conftest.make -fi -if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - SET_MAKE= -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - SET_MAKE="MAKE=${MAKE-make}" -fi - diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac deleted file mode 100644 index ffaa4584762..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac +++ /dev/null @@ -1,65 +0,0 @@ -# -*- Autoconf -*- -# Process this file with autoconf to produce a configure script. - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -AC_PREREQ(2.59) -AC_INIT([runAs],[0.1]) - -#changing default prefix value to empty string, so that binary does not -#gets installed within system -AC_PREFIX_DEFAULT(.) - -#add new arguments --with-home -AC_ARG_WITH(home,[--with-home path to hadoop home dir]) -AC_CONFIG_SRCDIR([main.c]) -AC_CONFIG_HEADER([runAs.h]) - -# Checks for programs. -AC_PROG_CC - -# Checks for libraries. - -# Checks for header files. -AC_HEADER_STDC -AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h]) - -#check for HADOOP_PREFIX -if test "$with_home" != "" -then -AC_DEFINE_UNQUOTED(HADOOP_PREFIX,"$with_home") -fi - -# Checks for typedefs, structures, and compiler characteristics. -AC_C_CONST -AC_TYPE_PID_T -AC_TYPE_MODE_T -AC_TYPE_SIZE_T - -# Checks for library functions. -AC_FUNC_MALLOC -AC_FUNC_REALLOC -AC_FUNC_CHOWN -AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup]) - -AC_CONFIG_FILES([Makefile]) -AC_OUTPUT - -AC_HEADER_STDBOOL -AC_PROG_MAKE_SET diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c deleted file mode 100644 index e31635f9235..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runAs.h" - -/** - * The binary would be accepting the command of following format: - * cluster-controller user hostname hadoop-daemon.sh-command - */ -int main(int argc, char **argv) { - int errorcode; - char *user; - char *hostname; - char *command; - struct passwd user_detail; - int i = 1; - /* - * Minimum number of arguments required for the binary to perform. - */ - if (argc < 4) { - fprintf(stderr, "Invalid number of arguments passed to the binary\n"); - return INVALID_ARGUMENT_NUMER; - } - - user = argv[1]; - if (user == NULL) { - fprintf(stderr, "Invalid user name\n"); - return INVALID_USER_NAME; - } - - if (getuserdetail(user, &user_detail) != 0) { - fprintf(stderr, "Invalid user name\n"); - return INVALID_USER_NAME; - } - - if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) { - fprintf(stderr, "Cannot run tasks as super user\n"); - return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS; - } - - hostname = argv[2]; - command = argv[3]; - return process_controller_command(user, hostname, command); -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c deleted file mode 100644 index 9b7803cbfbf..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "runAs.h" - -/* - * Function to get the user details populated given a user name. - */ -int getuserdetail(char *user, struct passwd *user_detail) { - struct passwd *tempPwdPtr; - int size = sysconf(_SC_GETPW_R_SIZE_MAX); - char pwdbuffer[size]; - if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) { - fprintf(stderr, "Invalid user provided to getpwnam\n"); - return -1; - } - return 0; -} - -/** - * Function to switch the user identity and set the appropriate - * group control as the user specified in the argument. - */ -int switchuser(char *user) { - //populate the user details - struct passwd user_detail; - if ((getuserdetail(user, &user_detail)) != 0) { - return INVALID_USER_NAME; - } - //set the right supplementary groups for the user. - if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) { - fprintf(stderr, "Init groups call for the user : %s failed\n", - user_detail.pw_name); - return INITGROUPS_FAILED; - } - errno = 0; - //switch the group. - setgid(user_detail.pw_gid); - if (errno != 0) { - fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name); - return SETUID_OPER_FAILED; - } - errno = 0; - //swith the user - setuid(user_detail.pw_uid); - if (errno != 0) { - fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name); - return SETUID_OPER_FAILED; - } - errno = 0; - //set the effective user id. - seteuid(user_detail.pw_uid); - if (errno != 0) { - fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name); - return SETUID_OPER_FAILED; - } - return 0; -} - -/* - * Top level method which processes a cluster management - * command. - */ -int process_cluster_command(char * user, char * node , char *command) { - char *finalcommandstr; - int len; - int errorcode = 0; - if (strncmp(command, "", strlen(command)) == 0) { - fprintf(stderr, "Invalid command passed\n"); - return INVALID_COMMAND_PASSED; - } - len = STRLEN + strlen(command); - finalcommandstr = (char *) malloc((len + 1) * sizeof(char)); - snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_PREFIX, - command); - finalcommandstr[len + 1] = '\0'; - errorcode = switchuser(user); - if (errorcode != 0) { - fprintf(stderr, "switch user failed\n"); - return errorcode; - } - errno = 0; - execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL); - if (errno != 0) { - fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno)); - } - return 0; -} - -/* - * Process cluster controller command the API exposed to the - * main in order to execute the cluster commands. - */ -int process_controller_command(char *user, char * node, char *command) { - return process_cluster_command(user, node, command); -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in b/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in deleted file mode 100644 index 4cdab71e6c3..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* -* List of possible error codes. -*/ -enum errorcodes { - INVALID_ARGUMENT_NUMER = 1, - INVALID_USER_NAME, //2 - SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3 - INITGROUPS_FAILED, //4 - SETUID_OPER_FAILED, //5 - INVALID_COMMAND_PASSED, //6 -}; - -#undef HADOOP_PREFIX - -#define SSH_COMMAND "ssh" - -#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded - -#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_PREFIX) - -/* - * Function to get the user details populated given a user name. - */ -int getuserdetails(char *user, struct passwd *user_detail); - - /* - * Process cluster controller command the API exposed to the - * main in order to execute the cluster commands. - */ -int process_controller_command(char *user, char *node, char *command); diff --git a/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml b/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml deleted file mode 100644 index b2c3735e285..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - - - - - security.daemon.protocol.acl - * - ACL for DaemonProtocol, extended by all other - Herriot RPC protocols. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.nn.protocol.acl - * - ACL for NNProtocol, used by the - Herriot AbstractDaemonCluster's implementations to connect to a remote - NameNode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.dn.protocol.acl - * - ACL for DNProtocol, used by the - Herriot AbstractDaemonCluster's implementations to connect to a remote - DataNode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.tt.protocol.acl - * - ACL for TTProtocol, used by the - Herriot AbstractDaemonCluster's implementations to connect to a remote - TaskTracker. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java deleted file mode 100644 index 7254aa782fa..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java +++ /dev/null @@ -1,599 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.IOException; -import java.util.*; - -import org.junit.Assert; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.test.system.process.RemoteProcess; - -import javax.management.*; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; - -/** - * Abstract class which encapsulates the DaemonClient which is used in the - * system tests.
- * - * @param PROXY the proxy implementation of a specific Daemon - */ -public abstract class AbstractDaemonClient { - private Configuration conf; - private Boolean jmxEnabled = null; - private MBeanServerConnection connection; - private int jmxPortNumber = -1; - private RemoteProcess process; - private boolean connected; - - private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class); - private static final String HADOOP_JMX_DOMAIN = "Hadoop"; - private static final String HADOOP_OPTS_ENV = "HADOOP_OPTS"; - - /** - * Create a Daemon client.
- * - * @param conf client to be used by proxy to connect to Daemon. - * @param process the Daemon process to manage the particular daemon. - * - * @throws IOException on RPC error - */ - public AbstractDaemonClient(Configuration conf, RemoteProcess process) - throws IOException { - this.conf = conf; - this.process = process; - } - - /** - * Gets if the client is connected to the Daemon
- * - * @return true if connected. - */ - public boolean isConnected() { - return connected; - } - - protected void setConnected(boolean connected) { - this.connected = connected; - } - - /** - * Create an RPC proxy to the daemon
- * - * @throws IOException on RPC error - */ - public abstract void connect() throws IOException; - - /** - * Disconnect the underlying RPC proxy to the daemon.
- * @throws IOException in case of communication errors - */ - public abstract void disconnect() throws IOException; - - /** - * Get the proxy to connect to a particular service Daemon.
- * - * @return proxy to connect to a particular service Daemon. - */ - protected abstract PROXY getProxy(); - - /** - * Gets the daemon level configuration.
- * - * @return configuration using which daemon is running - */ - public Configuration getConf() { - return conf; - } - - /** - * Gets the host on which Daemon is currently running.
- * - * @return hostname - */ - public String getHostName() { - return process.getHostName(); - } - - /** - * Gets if the Daemon is ready to accept RPC connections.
- * - * @return true if daemon is ready. - * @throws IOException on RPC error - */ - public boolean isReady() throws IOException { - return getProxy().isReady(); - } - - /** - * Kills the Daemon process
- * @throws IOException on RPC error - */ - public void kill() throws IOException { - process.kill(); - } - - /** - * Checks if the Daemon process is alive or not
- * @throws IOException on RPC error - */ - public void ping() throws IOException { - getProxy().ping(); - } - - /** - * Start up the Daemon process.
- * @throws IOException on RPC error - */ - public void start() throws IOException { - process.start(); - } - - /** - * Get system level view of the Daemon process. - * - * @return returns system level view of the Daemon process. - * - * @throws IOException on RPC error. - */ - public ProcessInfo getProcessInfo() throws IOException { - return getProxy().getProcessInfo(); - } - - /** - * Abstract method to retrieve the name of a daemon specific env. var - * @return name of Hadoop environment variable containing a daemon options - */ - abstract public String getHadoopOptsEnvName (); - - /** - * Checks remote daemon process info to see if certain JMX sys. properties - * are available and reckon if the JMX service is enabled on the remote side - * - * @return boolean code indicating availability of remote JMX - * @throws IOException is throws in case of communication errors - */ - public boolean isJmxEnabled() throws IOException { - return isJmxEnabled(HADOOP_OPTS_ENV) || - isJmxEnabled(getHadoopOptsEnvName()); - } - - /** - * Checks remote daemon process info to see if certain JMX sys. properties - * are available and reckon if the JMX service is enabled on the remote side - * - * @param envivar name of an evironment variable to be searched - * @return boolean code indicating availability of remote JMX - * @throws IOException is throws in case of communication errors - */ - protected boolean isJmxEnabled(String envivar) throws IOException { - if (jmxEnabled != null) return jmxEnabled; - boolean ret = false; - String jmxRemoteString = "-Dcom.sun.management.jmxremote"; - String hadoopOpts = getProcessInfo().getEnv().get(envivar); - LOG.debug("Looking into " + hadoopOpts + " from " + envivar); - List options = Arrays.asList(hadoopOpts.split(" ")); - ret = options.contains(jmxRemoteString); - jmxEnabled = ret; - return ret; - } - - /** - * Checks remote daemon process info to find remote JMX server port number - * By default this method will look into "HADOOP_OPTS" variable only. - * @return number of remote JMX server or -1 if it can't be found - * @throws IOException is throws in case of communication errors - * @throws IllegalArgumentException if non-integer port is set - * in the remote process info - */ - public int getJmxPortNumber() throws IOException, IllegalArgumentException { - int portNo = getJmxPortNumber(HADOOP_OPTS_ENV); - return portNo != -1 ? portNo : getJmxPortNumber(getHadoopOptsEnvName()); - } - - /** - * Checks remote daemon process info to find remote JMX server port number - * - * @param envivar name of the env. var. to look for JMX specific settings - * @return number of remote JMX server or -1 if it can't be found - * @throws IOException is throws in case of communication errors - * @throws IllegalArgumentException if non-integer port is set - * in the remote process info - */ - protected int getJmxPortNumber(final String envivar) throws - IOException, IllegalArgumentException { - if (jmxPortNumber != -1) return jmxPortNumber; - String jmxPortString = "-Dcom.sun.management.jmxremote.port"; - - String hadoopOpts = getProcessInfo().getEnv().get(envivar); - int portNumber = -1; - boolean found = false; - String[] options = hadoopOpts.split(" "); - for (String option : options) { - if (option.startsWith(jmxPortString)) { - found = true; - try { - portNumber = Integer.parseInt(option.split("=")[1]); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("JMX port number isn't integer"); - } - break; - } - } - if (!found) - throw new IllegalArgumentException("Can't detect JMX port number"); - jmxPortNumber = portNumber; - return jmxPortNumber; - } - - /** - * Return a file status object that represents the path. - * @param path - * given path - * @param local - * whether the path is local or not - * @return a FileStatus object - * @throws IOException see specific implementation - */ - public FileStatus getFileStatus(String path, boolean local) throws IOException { - return getProxy().getFileStatus(path, local); - } - - /** - * Create a file with full permissions in a file system. - * @param path - source path where the file has to create. - * @param fileName - file name - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void createFile(String path, String fileName, - boolean local) throws IOException { - getProxy().createFile(path, fileName, null, local); - } - - /** - * Create a file with given permissions in a file system. - * @param path - source path where the file has to create. - * @param fileName - file name. - * @param permission - file permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void createFile(String path, String fileName, - FsPermission permission, boolean local) throws IOException { - getProxy().createFile(path, fileName, permission, local); - } - - /** - * Create a folder with default permissions in a file system. - * @param path - source path where the file has to be creating. - * @param folderName - folder name. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void createFolder(String path, String folderName, - boolean local) throws IOException { - getProxy().createFolder(path, folderName, null, local); - } - - /** - * Create a folder with given permissions in a file system. - * @param path - source path where the file has to be creating. - * @param folderName - folder name. - * @param permission - folder permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void createFolder(String path, String folderName, - FsPermission permission, boolean local) throws IOException { - getProxy().createFolder(path, folderName, permission, local); - } - - /** - * List the statuses of the files/directories in the given path if the path is - * a directory. - * - * @param path - * given path - * @param local - * whether the path is local or not - * @return the statuses of the files/directories in the given patch - * @throws IOException on RPC error. - */ - public FileStatus[] listStatus(String path, boolean local) - throws IOException { - return getProxy().listStatus(path, local); - } - - /** - * List the statuses of the files/directories in the given path if the path is - * a directory recursive/nonrecursively depending on parameters - * - * @param path - * given path - * @param local - * whether the path is local or not - * @param recursive - * whether to recursively get the status - * @return the statuses of the files/directories in the given patch - * @throws IOException is thrown on RPC error. - */ - public FileStatus[] listStatus(String path, boolean local, boolean recursive) - throws IOException { - List status = new ArrayList(); - addStatus(status, path, local, recursive); - return status.toArray(new FileStatus[0]); - } - - private void addStatus(List status, String f, - boolean local, boolean recursive) - throws IOException { - FileStatus[] fs = listStatus(f, local); - if (fs != null) { - for (FileStatus fileStatus : fs) { - if (!f.equals(fileStatus.getPath().toString())) { - status.add(fileStatus); - if (recursive) { - addStatus(status, fileStatus.getPath().toString(), local, recursive); - } - } - } - } - } - - /** - * Gets number of times FATAL log messages where logged in Daemon logs. - *
- * Pattern used for searching is FATAL.
- * @param excludeExpList list of exception to exclude - * @return number of occurrence of fatal message. - * @throws IOException in case of communication errors - */ - public int getNumberOfFatalStatementsInLog(String [] excludeExpList) - throws IOException { - DaemonProtocol proxy = getProxy(); - String pattern = "FATAL"; - return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList); - } - - /** - * Gets number of times ERROR log messages where logged in Daemon logs. - *
- * Pattern used for searching is ERROR.
- * @param excludeExpList list of exception to exclude - * @return number of occurrence of error message. - * @throws IOException is thrown on RPC error. - */ - public int getNumberOfErrorStatementsInLog(String[] excludeExpList) - throws IOException { - DaemonProtocol proxy = getProxy(); - String pattern = "ERROR"; - return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList); - } - - /** - * Gets number of times Warning log messages where logged in Daemon logs. - *
- * Pattern used for searching is WARN.
- * @param excludeExpList list of exception to exclude - * @return number of occurrence of warning message. - * @throws IOException thrown on RPC error. - */ - public int getNumberOfWarnStatementsInLog(String[] excludeExpList) - throws IOException { - DaemonProtocol proxy = getProxy(); - String pattern = "WARN"; - return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList); - } - - /** - * Gets number of time given Exception were present in log file.
- * - * @param e exception class. - * @param excludeExpList list of exceptions to exclude. - * @return number of exceptions in log - * @throws IOException is thrown on RPC error. - */ - public int getNumberOfExceptionsInLog(Exception e, - String[] excludeExpList) throws IOException { - DaemonProtocol proxy = getProxy(); - String pattern = e.getClass().getSimpleName(); - return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList); - } - - /** - * Number of times ConcurrentModificationException present in log file. - *
- * @param excludeExpList list of exceptions to exclude. - * @return number of times exception in log file. - * @throws IOException is thrown on RPC error. - */ - public int getNumberOfConcurrentModificationExceptionsInLog( - String[] excludeExpList) throws IOException { - return getNumberOfExceptionsInLog(new ConcurrentModificationException(), - excludeExpList); - } - - private int errorCount; - private int fatalCount; - private int concurrentExceptionCount; - - /** - * Populate the initial exception counts to be used to assert once a testcase - * is done there was no exception in the daemon when testcase was run. - * @param excludeExpList list of exceptions to exclude - * @throws IOException is thrown on RPC error. - */ - protected void populateExceptionCount(String [] excludeExpList) - throws IOException { - errorCount = getNumberOfErrorStatementsInLog(excludeExpList); - LOG.info("Number of error messages in logs : " + errorCount); - fatalCount = getNumberOfFatalStatementsInLog(excludeExpList); - LOG.info("Number of fatal statement in logs : " + fatalCount); - concurrentExceptionCount = - getNumberOfConcurrentModificationExceptionsInLog(excludeExpList); - LOG.info("Number of concurrent modification in logs : " - + concurrentExceptionCount); - } - - /** - * Assert if the new exceptions were logged into the log file. - *
- * - * Pre-req for the method is that populateExceptionCount() has - * to be called before calling this method. - * @param excludeExpList list of exceptions to exclude - * @throws IOException is thrown on RPC error. - */ - protected void assertNoExceptionsOccurred(String [] excludeExpList) - throws IOException { - int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList); - LOG.info("Number of error messages while asserting :" + newerrorCount); - int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList); - LOG.info("Number of fatal messages while asserting : " + newfatalCount); - int newconcurrentExceptionCount = - getNumberOfConcurrentModificationExceptionsInLog(excludeExpList); - LOG.info("Number of concurrentmodification exception while asserting :" - + newconcurrentExceptionCount); - Assert.assertEquals( - "New Error Messages logged in the log file", errorCount, newerrorCount); - Assert.assertEquals( - "New Fatal messages logged in the log file", fatalCount, newfatalCount); - Assert.assertEquals( - "New ConcurrentModificationException in log file", - concurrentExceptionCount, newconcurrentExceptionCount); - } - - /** - * Builds correct name of JMX object name from given domain, service name, type - * @param domain JMX domain name - * @param serviceName of the service where MBean is registered (NameNode) - * @param typeName of the MXBean class - * @return ObjectName for requested MXBean of null if one wasn't - * found - * @throws java.io.IOException in if object name is malformed - */ - protected ObjectName getJmxBeanName(String domain, String serviceName, - String typeName) throws IOException { - if (domain == null) - domain = HADOOP_JMX_DOMAIN; - - ObjectName jmxBean; - try { - jmxBean = new ObjectName(domain + ":service=" + serviceName + - ",name=" + typeName); - } catch (MalformedObjectNameException e) { - LOG.debug(e.getStackTrace()); - throw new IOException(e); - } - return jmxBean; - } - - /** - * Create connection with the remote JMX server at given host and port - * @param host name of the remote JMX server host - * @param port port number of the remote JXM server host - * @return instance of MBeanServerConnection or null if one - * hasn't been established - * @throws IOException in case of comminication errors - */ - protected MBeanServerConnection establishJmxConnection(String host, int port) - throws IOException { - if (connection != null) return connection; - String urlPattern = null; - try { - urlPattern = "service:jmx:rmi:///jndi/rmi://" + - host + ":" + port + - "/jmxrmi"; - JMXServiceURL url = new JMXServiceURL(urlPattern); - JMXConnector connector = JMXConnectorFactory.connect(url, null); - connection = connector.getMBeanServerConnection(); - } catch (java.net.MalformedURLException badURLExc) { - LOG.debug("bad url: " + urlPattern, badURLExc); - throw new IOException(badURLExc); - } - return connection; - } - - Hashtable jmxObjectNames = - new Hashtable(); - - /** - * Method implements all logic for receiving a bean's attribute. - * If any initializations such as establishing bean server connections, etc. - * are need it will do it. - * @param serviceName name of the service where MBean is registered (NameNode) - * @param type name of the MXBean class - * @param attributeName name of the attribute to be retrieved - * @return Object value of the attribute or null if not found - * @throws IOException is thrown in case of any errors - */ - protected Object getJmxAttribute (String serviceName, - String type, - String attributeName) - throws IOException { - Object retAttribute = null; - String domain = null; - if (isJmxEnabled()) { - try { - MBeanServerConnection conn = - establishJmxConnection(getHostName(), - getJmxPortNumber(HADOOP_OPTS_ENV)); - for (String d : conn.getDomains()) { - if (d != null && d.startsWith(HADOOP_JMX_DOMAIN)) - domain = d; - } - if (!jmxObjectNames.containsKey(type)) - jmxObjectNames.put(type, getJmxBeanName(domain, serviceName, type)); - retAttribute = - conn.getAttribute(jmxObjectNames.get(type), attributeName); - } catch (MBeanException e) { - LOG.debug(e.getStackTrace()); - throw new IOException(e); - } catch (AttributeNotFoundException e) { - LOG.warn(e.getStackTrace()); - throw new IOException(e); - } catch (InstanceNotFoundException e) { - LOG.warn(e.getStackTrace()); - throw new IOException(e); - } catch (ReflectionException e) { - LOG.debug(e.getStackTrace()); - throw new IOException(e); - } - } - return retAttribute; - } - - /** - * This method has to be implemented by appropriate concrete daemon client - * e.g. DNClient, NNClient, etc. - * Concrete implementation has to provide names of the service and bean type - * @param attributeName name of the attribute to be retrieved - * @return Object value of the given attribute - * @throws IOException is thrown in case of communication errors - */ - public abstract Object getDaemonAttribute (String attributeName) - throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java deleted file mode 100644 index b1277a0ea3a..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java +++ /dev/null @@ -1,537 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.IOException; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.DataInputStream; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Iterator; -import java.util.Enumeration; -import java.util.Arrays; -import java.util.Hashtable; -import java.net.URI; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.test.system.process.ClusterProcessManager; -import org.apache.hadoop.test.system.process.RemoteProcess; - -/** - * Abstract class which represent the cluster having multiple daemons. - */ -@SuppressWarnings("unchecked") -public abstract class AbstractDaemonCluster { - - private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class); - private String [] excludeExpList ; - private Configuration conf; - protected ClusterProcessManager clusterManager; - private Map, List> daemons = - new LinkedHashMap, List>(); - private String newConfDir = null; - private static final String CONF_HADOOP_LOCAL_DIR = - "test.system.hdrc.hadoop.local.confdir"; - private final static Object waitLock = new Object(); - - /** - * Constructor to create a cluster client.
- * - * @param conf - * Configuration to be used while constructing the cluster. - * @param rcluster - * process manger instance to be used for managing the daemons. - * - * @throws IOException - */ - public AbstractDaemonCluster(Configuration conf, - ClusterProcessManager rcluster) throws IOException { - this.conf = conf; - this.clusterManager = rcluster; - createAllClients(); - } - - /** - * The method returns the cluster manager. The system test cases require an - * instance of HadoopDaemonRemoteCluster to invoke certain operation on the - * daemon. - * - * @return instance of clusterManager - */ - public ClusterProcessManager getClusterManager() { - return clusterManager; - } - - protected void createAllClients() throws IOException { - for (RemoteProcess p : clusterManager.getAllProcesses()) { - List dms = daemons.get(p.getRole()); - if (dms == null) { - dms = new ArrayList(); - daemons.put(p.getRole(), dms); - } - dms.add(createClient(p)); - } - } - - /** - * Method to create the daemon client.
- * - * @param process - * to manage the daemon. - * @return instance of the daemon client - * - * @throws IOException - */ - protected abstract AbstractDaemonClient - createClient(RemoteProcess process) throws IOException; - - /** - * Get the global cluster configuration which was used to create the - * cluster.
- * - * @return global configuration of the cluster. - */ - public Configuration getConf() { - return conf; - } - - /** - * - - /** - * Return the client handle of all the Daemons.
- * - * @return map of role to daemon clients' list. - */ - public Map, List> getDaemons() { - return daemons; - } - - /** - * Checks if the cluster is ready for testing.
- * Algorithm for checking is as follows :
- *
    - *
  • Wait for Daemon to come up
  • - *
  • Check if daemon is ready
  • - *
  • If one of the daemon is not ready, return false
  • - *
- * - * @return true if whole cluster is ready. - * - * @throws IOException - */ - public boolean isReady() throws IOException { - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - waitForDaemon(daemon); - if (!daemon.isReady()) { - return false; - } - } - } - return true; - } - - protected void waitForDaemon(AbstractDaemonClient d) { - final int TEN_SEC = 10000; - while(true) { - try { - LOG.info("Waiting for daemon at " + d.getHostName() + " to come up."); - LOG.info("Daemon might not be " + - "ready or the call to setReady() method hasn't been " + - "injected to " + d.getClass() + " "); - d.connect(); - break; - } catch (IOException e) { - try { - Thread.sleep(TEN_SEC); - } catch (InterruptedException ie) { - } - } - } - } - - /** - * Starts the cluster daemons. - * @throws IOException - */ - public void start() throws IOException { - clusterManager.start(); - } - - /** - * Stops the cluster daemons. - * @throws IOException - */ - public void stop() throws IOException { - clusterManager.stop(); - } - - /** - * Connect to daemon RPC ports. - * @throws IOException - */ - public void connect() throws IOException { - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - daemon.connect(); - } - } - } - - /** - * Disconnect to daemon RPC ports. - * @throws IOException - */ - public void disconnect() throws IOException { - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - daemon.disconnect(); - } - } - } - - /** - * Ping all the daemons of the cluster. - * @throws IOException - */ - public void ping() throws IOException { - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - LOG.info("Daemon is : " + daemon.getHostName() + " pinging...."); - daemon.ping(); - } - } - } - - /** - * Connect to the cluster and ensure that it is clean to run tests. - * @throws Exception - */ - public void setUp() throws Exception { - while (!isReady()) { - Thread.sleep(1000); - } - connect(); - ping(); - clearAllControlActions(); - ensureClean(); - populateExceptionCounts(); - } - - /** - * This is mainly used for the test cases to set the list of exceptions - * that will be excluded. - * @param excludeExpList list of exceptions to exclude - */ - public void setExcludeExpList(String [] excludeExpList) { - this.excludeExpList = excludeExpList; - } - - public void clearAllControlActions() throws IOException { - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - LOG.info("Daemon is : " + daemon.getHostName() + " pinging...."); - daemon.getProxy().clearActions(); - } - } - } - - /** - * Ensure that the cluster is clean to run tests. - * @throws IOException - */ - public void ensureClean() throws IOException { - } - - /** - * Ensure that cluster is clean. Disconnect from the RPC ports of the daemons. - * @throws IOException - */ - public void tearDown() throws IOException { - ensureClean(); - clearAllControlActions(); - assertNoExceptionMessages(); - disconnect(); - } - - /** - * Populate the exception counts in all the daemons so that it can be checked when - * the testcase has finished running.
- * @throws IOException - */ - protected void populateExceptionCounts() throws IOException { - for(List lst : daemons.values()) { - for(AbstractDaemonClient d : lst) { - d.populateExceptionCount(excludeExpList); - } - } - } - - /** - * Assert no exception has been thrown during the sequence of the actions. - *
- * @throws IOException - */ - protected void assertNoExceptionMessages() throws IOException { - for(List lst : daemons.values()) { - for(AbstractDaemonClient d : lst) { - d.assertNoExceptionsOccurred(excludeExpList); - } - } - } - - /** - * Get the proxy user definitions from cluster from configuration. - * @return ProxyUserDefinitions - proxy users data like groups and hosts. - * @throws Exception - if no proxy users found in config. - */ - public ProxyUserDefinitions getHadoopProxyUsers() throws - Exception { - Iterator itr = conf.iterator(); - ArrayList proxyUsers = new ArrayList(); - while (itr.hasNext()) { - if (itr.next().toString().indexOf("hadoop.proxyuser") >= 0 && - itr.next().toString().indexOf("groups=") >= 0) { - proxyUsers.add(itr.next().toString().split("\\.")[2]); - } - } - if (proxyUsers.size() == 0) { - LOG.error("No proxy users found in the configuration."); - throw new Exception("No proxy users found in the configuration."); - } - - ProxyUserDefinitions pud = new ProxyUserDefinitions() { - @Override - public boolean writeToFile(URI filePath) throws IOException { - throw new UnsupportedOperationException("No such method exists."); - }; - }; - - for (String userName : proxyUsers) { - List groups = Arrays.asList(conf.get("hadoop.proxyuser." + - userName + ".groups").split("//,")); - List hosts = Arrays.asList(conf.get("hadoop.proxyuser." + - userName + ".hosts").split("//,")); - ProxyUserDefinitions.GroupsAndHost definitions = - pud.new GroupsAndHost(); - definitions.setGroups(groups); - definitions.setHosts(hosts); - pud.addProxyUser(userName, definitions); - } - return pud; - } - - /** - * It's a local folder where the config file stores temporarily - * while serializing the object. - * @return String temporary local folder path for configuration. - */ - private String getHadoopLocalConfDir() { - String hadoopLocalConfDir = conf.get(CONF_HADOOP_LOCAL_DIR); - if (hadoopLocalConfDir == null || hadoopLocalConfDir.isEmpty()) { - LOG.error("No configuration " - + "for the CONF_HADOOP_LOCAL_DIR passed"); - throw new IllegalArgumentException( - "No Configuration passed for hadoop conf local directory"); - } - return hadoopLocalConfDir; - } - - /** - * It uses to restart the cluster with new configuration at runtime.
- * @param props attributes for new configuration. - * @param configFile configuration file. - * @throws IOException if an I/O error occurs. - */ - public void restartClusterWithNewConfig(Hashtable props, - String configFile) throws IOException { - - String mapredConf = null; - String localDirPath = null; - File localFolderObj = null; - File xmlFileObj = null; - String confXMLFile = null; - Configuration initConf = new Configuration(getConf()); - Enumeration e = props.keys(); - while (e.hasMoreElements()) { - String propKey = e.nextElement(); - Object propValue = props.get(propKey); - initConf.set(propKey,propValue.toString()); - } - - localDirPath = getHadoopLocalConfDir(); - localFolderObj = new File(localDirPath); - if (!localFolderObj.exists()) { - localFolderObj.mkdir(); - } - confXMLFile = localDirPath + File.separator + configFile; - xmlFileObj = new File(confXMLFile); - initConf.writeXml(new FileOutputStream(xmlFileObj)); - newConfDir = clusterManager.pushConfig(localDirPath); - stop(); - waitForClusterToStop(); - clusterManager.start(newConfDir); - waitForClusterToStart(); - localFolderObj.delete(); - } - - /** - * It uses to restart the cluster with default configuration.
- * @throws IOException if an I/O error occurs. - */ - public void restart() throws - IOException { - stop(); - waitForClusterToStop(); - start(); - waitForClusterToStart(); - cleanupNewConf(newConfDir); - } - - /** - * It uses to delete the new configuration folder. - * @param path - configuration directory path. - * @throws IOException if an I/O error occurs. - */ - public void cleanupNewConf(String path) throws IOException { - File file = new File(path); - file.delete(); - } - - /** - * It uses to wait until the cluster is stopped.
- * @throws IOException if an I/O error occurs. - */ - public void waitForClusterToStop() throws - IOException { - List chkDaemonStop = new ArrayList(); - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - DaemonStopThread dmStop = new DaemonStopThread(daemon); - chkDaemonStop.add(dmStop); - dmStop.start(); - } - } - - for (Thread daemonThread : chkDaemonStop){ - try { - daemonThread.join(); - } catch(InterruptedException intExp) { - LOG.warn("Interrupted while thread is joining." + intExp.getMessage()); - } - } - } - - /** - * It uses to wait until the cluster is started.
- * @throws IOException if an I/O error occurs. - */ - public void waitForClusterToStart() throws - IOException { - List chkDaemonStart = new ArrayList(); - for (List set : daemons.values()) { - for (AbstractDaemonClient daemon : set) { - DaemonStartThread dmStart = new DaemonStartThread(daemon); - chkDaemonStart.add(dmStart);; - dmStart.start(); - } - } - - for (Thread daemonThread : chkDaemonStart){ - try { - daemonThread.join(); - } catch(InterruptedException intExp) { - LOG.warn("Interrupted while thread is joining" + intExp.getMessage()); - } - } - } - - /** - * It waits for specified amount of time. - * @param duration time in milliseconds. - * @throws InterruptedException if any thread interrupted the current - * thread while it is waiting for a notification. - */ - public void waitFor(long duration) { - try { - synchronized (waitLock) { - waitLock.wait(duration); - } - } catch (InterruptedException intExp) { - LOG.warn("Interrrupeted while thread is waiting" + intExp.getMessage()); - } - } - - class DaemonStartThread extends Thread { - private AbstractDaemonClient daemon; - - public DaemonStartThread(AbstractDaemonClient daemon) { - this.daemon = daemon; - } - - public void run(){ - LOG.info("Waiting for Daemon " + daemon.getHostName() - + " to come up....."); - while (true) { - try { - daemon.ping(); - LOG.info("Daemon is : " + daemon.getHostName() + " pinging..."); - break; - } catch (Exception exp) { - if(LOG.isDebugEnabled()) { - LOG.debug(daemon.getHostName() + " is waiting to come up."); - } - waitFor(60000); - } - } - } - } - - class DaemonStopThread extends Thread { - private AbstractDaemonClient daemon; - - public DaemonStopThread(AbstractDaemonClient daemon) { - this.daemon = daemon; - } - - public void run() { - LOG.info("Waiting for Daemon " + daemon.getHostName() - + " to stop....."); - while (true) { - try { - daemon.ping(); - if(LOG.isDebugEnabled()) { - LOG.debug(daemon.getHostName() +" is waiting state to stop."); - } - waitFor(60000); - } catch (Exception exp) { - LOG.info("Daemon is : " + daemon.getHostName() + " stopped..."); - break; - } - } - } - } -} - diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java deleted file mode 100644 index de1b7998e67..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.io.Writable; - -/** - * Class to represent a control action which can be performed on Daemon.
- * - */ - -public abstract class ControlAction implements Writable { - - private T target; - - /** - * Default constructor of the Control Action, sets the Action type to zero.
- */ - public ControlAction() { - } - - /** - * Constructor which sets the type of the Control action to a specific type.
- * - * @param target - * of the control action. - */ - public ControlAction(T target) { - this.target = target; - } - - /** - * Gets the id of the control action
- * - * @return target of action - */ - public T getTarget() { - return target; - } - - @Override - public void readFields(DataInput in) throws IOException { - target.readFields(in); - } - - @Override - public void write(DataOutput out) throws IOException { - target.write(out); - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof ControlAction) { - ControlAction other = (ControlAction) obj; - return (this.target.equals(other.getTarget())); - } else { - return false; - } - } - - - @Override - public String toString() { - return "Action Target : " + this.target; - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java deleted file mode 100644 index 6cdccc3b113..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.FileNotFoundException; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.ipc.VersionedProtocol; -import org.apache.hadoop.fs.permission.FsPermission; - -/** - * RPC interface of a given Daemon. - */ -public interface DaemonProtocol extends VersionedProtocol{ - long versionID = 1L; - - /** - * Returns the Daemon configuration. - * @return Configuration - * @throws IOException in case of errors - */ - Configuration getDaemonConf() throws IOException; - - /** - * Check if the Daemon is alive. - * - * @throws IOException - * if Daemon is unreachable. - */ - void ping() throws IOException; - - /** - * Check if the Daemon is ready to accept RPC connections. - * - * @return true if Daemon is ready to accept RPC connection. - * @throws IOException in case of errors - */ - boolean isReady() throws IOException; - - /** - * Get system level view of the Daemon process. - * - * @return returns system level view of the Daemon process. - * - * @throws IOException in case of errors - */ - ProcessInfo getProcessInfo() throws IOException; - - /** - * Return a file status object that represents the path. - * @param path - * given path - * @param local - * whether the path is local or not - * @return a FileStatus object - * @throws FileNotFoundException when the path does not exist; - * IOException see specific implementation - */ - FileStatus getFileStatus(String path, boolean local) throws IOException; - - /** - * Create a file with given permissions in a file system. - * @param path - source path where the file has to create. - * @param fileName - file name. - * @param permission - file permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - void createFile(String path, String fileName, - FsPermission permission, boolean local) throws IOException; - - /** - * Create a folder with given permissions in a file system. - * @param path - source path where the file has to be creating. - * @param folderName - folder name. - * @param permission - folder permissions. - * @param local - identifying the path whether its local or not. - * @throws IOException - if an I/O error occurs. - */ - public void createFolder(String path, String folderName, - FsPermission permission, boolean local) throws IOException; - /** - * List the statuses of the files/directories in the given path if the path is - * a directory. - * - * @param path - * given path - * @param local - * whether the path is local or not - * @return the statuses of the files/directories in the given patch - * @throws IOException in case of errors - */ - FileStatus[] listStatus(String path, boolean local) throws IOException; - - /** - * Enables a particular control action to be performed on the Daemon
- * - * @param action is a control action to be enabled. - * - * @throws IOException in case of errors - */ - @SuppressWarnings("unchecked") - void sendAction(ControlAction action) throws IOException; - - /** - * Checks if the particular control action has be delivered to the Daemon - * component
- * - * @param action to be checked. - * - * @return true if action is still in waiting queue of - * actions to be delivered. - * @throws IOException in case of errors - */ - @SuppressWarnings("unchecked") - boolean isActionPending(ControlAction action) throws IOException; - - /** - * Removes a particular control action from the list of the actions which the - * daemon maintains.
- * Not to be directly called by Test Case or clients. - * @param action to be removed - * @throws IOException in case of errors - */ - - @SuppressWarnings("unchecked") - void removeAction(ControlAction action) throws IOException; - - /** - * Clears out the list of control actions on the particular daemon. - *
- * @throws IOException in case of errors - */ - void clearActions() throws IOException; - - /** - * Gets a list of pending actions which are targeted on the specified key. - *
- * Not to be directly used by clients - * @param key target - * @return list of actions. - * @throws IOException in case of errors - */ - @SuppressWarnings("unchecked") - ControlAction[] getActions(Writable key) throws IOException; - - /** - * Gets the number of times a particular pattern has been found in the - * daemons log file.
- * Please note that search spans across all previous messages of - * Daemon, so better practice is to get previous counts before an operation - * and then re-check if the sequence of action has caused any problems - * @param pattern to look for in the damon's log file - * @param list of exceptions to ignore - * @return number of times the pattern if found in log file. - * @throws IOException in case of errors - */ - int getNumberOfMatchesInLogFile(String pattern, String[] list) - throws IOException; - - /** - * Gets the user who started the particular daemon initially.
- * - * @return user who started the particular daemon. - * @throws IOException in case of errors - */ - String getDaemonUser() throws IOException; - - /** - * It uses for suspending the process. - * @param pid process id. - * @return true if the process is suspended otherwise false. - * @throws IOException if an I/O error occurs. - */ - boolean suspendProcess(String pid) throws IOException; - - /** - * It uses for resuming the suspended process. - * @param pid process id - * @return true if suspended process is resumed otherwise false. - * @throws IOException if an I/O error occurs. - */ - boolean resumeProcess(String pid) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java deleted file mode 100644 index 22b385529f1..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.util.Map; - -import org.apache.hadoop.io.Writable; - -/** - * Daemon system level process information. - */ -public interface ProcessInfo extends Writable { - /** - * Get the current time in the millisecond.
- * - * @return current time on daemon clock in millisecond. - */ - public long currentTimeMillis(); - - /** - * Get the environment that was used to start the Daemon process.
- * - * @return the environment variable list. - */ - public Map getEnv(); - - /** - * Get the System properties of the Daemon process.
- * - * @return the properties list. - */ - public Map getSystemProperties(); - - /** - * Get the number of active threads in Daemon VM.
- * - * @return number of active threads in Daemon VM. - */ - public int activeThreadCount(); - - /** - * Get the maximum heap size that is configured for the Daemon VM.
- * - * @return maximum heap size. - */ - public long maxMemory(); - - /** - * Get the free memory in Daemon VM.
- * - * @return free memory. - */ - public long freeMemory(); - - /** - * Get the total used memory in Demon VM.
- * - * @return total used memory. - */ - public long totalMemory(); -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java deleted file mode 100644 index c32666dc45a..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - - -public class ProcessInfoImpl implements ProcessInfo { - - private int threadCount; - private long currentTime; - private long freemem; - private long maxmem; - private long totmem; - private Map env; - private Map props; - - public ProcessInfoImpl() { - env = new HashMap(); - props = new HashMap(); - } - - /** - * Construct a concrete process information object.
- * - * @param threadCount - * count of threads. - * @param currentTime - * @param freemem - * @param maxmem - * @param totmem - * @param env environment list. - * @param props - */ - public ProcessInfoImpl(int threadCount, long currentTime, long freemem, - long maxmem, long totmem, Map env, - Map props) { - this.threadCount = threadCount; - this.currentTime = currentTime; - this.freemem = freemem; - this.maxmem = maxmem; - this.totmem = totmem; - this.env = env; - this.props = props; - } - - @Override - public int activeThreadCount() { - return threadCount; - } - - @Override - public long currentTimeMillis() { - return currentTime; - } - - @Override - public long freeMemory() { - return freemem; - } - - @Override - public Map getEnv() { - return env; - } - - @Override - public Map getSystemProperties() { - return props; - } - - @Override - public long maxMemory() { - return maxmem; - } - - @Override - public long totalMemory() { - return totmem; - } - - @Override - public void readFields(DataInput in) throws IOException { - this.threadCount = in.readInt(); - this.currentTime = in.readLong(); - this.freemem = in.readLong(); - this.maxmem = in.readLong(); - this.totmem = in.readLong(); - read(in, env); - read(in, props); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeInt(threadCount); - out.writeLong(currentTime); - out.writeLong(freemem); - out.writeLong(maxmem); - out.writeLong(totmem); - write(out, env); - write(out, props); - } - - private void read(DataInput in, Map map) throws IOException { - int size = in.readInt(); - for (int i = 0; i < size; i = i + 2) { - String key = in.readUTF(); - String value = in.readUTF(); - map.put(key, value); - } - } - - private void write(DataOutput out, Map map) - throws IOException { - int size = (map.size() * 2); - out.writeInt(size); - for (Map.Entry entry : map.entrySet()) { - out.writeUTF(entry.getKey()); - out.writeUTF(entry.getValue()); - } - } - - @Override - public String toString() { - StringBuffer strBuf = new StringBuffer(); - strBuf.append(String.format("active threads : %d\n", threadCount)); - strBuf.append(String.format("current time : %d\n", currentTime)); - strBuf.append(String.format("free memory : %d\n", freemem)); - strBuf.append(String.format("total memory : %d\n", totmem)); - strBuf.append(String.format("max memory : %d\n", maxmem)); - strBuf.append("Environment Variables : \n"); - for (Map.Entry entry : env.entrySet()) { - strBuf.append(String.format("key : %s value : %s \n", entry.getKey(), - entry.getValue())); - } - return strBuf.toString(); - } - -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java deleted file mode 100644 index c9d6be441e0..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.test.system; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.io.IOException; -import java.net.URI; - -/** - * Its the data container which contains host names and - * groups against each proxy user. - */ -public abstract class ProxyUserDefinitions { - - /** - * Groups and host names container - */ - public class GroupsAndHost { - private List groups; - private List hosts; - public List getGroups() { - return groups; - } - public void setGroups(List groups) { - this.groups = groups; - } - public List getHosts() { - return hosts; - } - public void setHosts(List hosts) { - this.hosts = hosts; - } - } - - protected Map proxyUsers; - protected ProxyUserDefinitions () { - proxyUsers = new HashMap(); - } - - /** - * Add proxy user data to a container. - * @param userName - proxy user name. - * @param definitions - groups and host names. - */ - public void addProxyUser (String userName, GroupsAndHost definitions) { - proxyUsers.put(userName, definitions); - } - - /** - * Get the host names and groups against given proxy user. - * @return - GroupsAndHost object. - */ - public GroupsAndHost getProxyUser (String userName) { - return proxyUsers.get(userName); - } - - /** - * Get the Proxy users data which contains the host names - * and groups against each user. - * @return - the proxy users data as hash map. - */ - public Map getProxyUsers () { - return proxyUsers; - } - - /** - * The implementation of this method has to be provided by a child of the class - * @param filePath - * @return - * @throws IOException - */ - public abstract boolean writeToFile(URI filePath) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java deleted file mode 100644 index 70dd4146157..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system.process; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import org.apache.hadoop.conf.Configuration; - -/** - * Interface to manage the remote processes in the cluster. - */ -public interface ClusterProcessManager { - - /** - * Initialization method to pass the configuration object which is required - * by the ClusterProcessManager to manage the cluster.
- * Configuration object should typically contain all the parameters which are - * required by the implementations.
- * - * @param conf configuration containing values of the specific keys which - * are required by the implementation of the cluster process manger. - * - * @throws IOException when initialization fails. - */ - void init(Configuration conf) throws IOException; - - /** - * Get the list of RemoteProcess handles of all the remote processes. - */ - List getAllProcesses(); - - /** - * Get all the roles this cluster's daemon processes have. - */ - Set> getRoles(); - - /** - * Method to start all the remote daemons.
- * - * @throws IOException if startup procedure fails. - */ - void start() throws IOException; - - /** - * Starts the daemon from the user specified conf dir. - * @param newConfLocation the dir where the new conf files reside. - * @throws IOException if start from new conf fails. - */ - void start(String newConfLocation) throws IOException; - - /** - * Stops the daemon running from user specified conf dir. - * - * @param newConfLocation the dir where the new conf files reside. - * @throws IOException if stop from new conf fails. - */ - void stop(String newConfLocation) throws IOException; - - /** - * Method to shutdown all the remote daemons.
- * - * @throws IOException if shutdown procedure fails. - */ - void stop() throws IOException; - - /** - * Gets if multi-user support is enabled for this cluster. - *
- * @return true if multi-user support is enabled. - * @throws IOException if RPC returns error. - */ - boolean isMultiUserSupported() throws IOException; - - /** - * The pushConfig is used to push a new config to the daemons. - * @param localDir - * @return is the remoteDir location where config will be pushed - * @throws IOException if pushConfig fails. - */ - String pushConfig(String localDir) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java deleted file mode 100644 index d3e5d630a37..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java +++ /dev/null @@ -1,404 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system.process; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; -import java.net.InetAddress; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; - -/** - * The concrete class which implements the start up and shut down based routines - * based on the hadoop-daemon.sh.
- * - * Class requires two keys to be present in the Configuration objects passed to - * it. Look at CONF_HADOOPHOME and - * CONF_HADOOPCONFDIR for the names of the - * configuration keys. - * - * Following will be the format which the final command execution would look : - *
- * - * ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName - * --config HADOOP_CONF_DIR (start|stop) command' - * - */ -public abstract class HadoopDaemonRemoteCluster - implements ClusterProcessManager { - - private static final Log LOG = LogFactory - .getLog(HadoopDaemonRemoteCluster.class.getName()); - - public static final String CONF_HADOOPNEWCONFDIR = - "test.system.hdrc.hadoopnewconfdir"; - /** - * Key used to configure the HADOOP_PREFIX to be used by the - * HadoopDaemonRemoteCluster. - */ - public final static String CONF_HADOOPHOME = - "test.system.hdrc.hadoophome"; - - public final static String CONF_SCRIPTDIR = - "test.system.hdrc.deployed.scripts.dir"; - /** - * Key used to configure the HADOOP_CONF_DIR to be used by the - * HadoopDaemonRemoteCluster. - */ - public final static String CONF_HADOOPCONFDIR = - "test.system.hdrc.hadoopconfdir"; - - public final static String CONF_DEPLOYED_HADOOPCONFDIR = - "test.system.hdrc.deployed.hadoopconfdir"; - - private String hadoopHome; - protected String hadoopConfDir; - protected String scriptsDir; - protected String hadoopNewConfDir; - private final Set> roles; - private final List daemonInfos; - private List processes; - protected Configuration conf; - - public static class HadoopDaemonInfo { - public final String cmd; - public final Enum role; - public final List hostNames; - public HadoopDaemonInfo(String cmd, Enum role, List hostNames) { - super(); - this.cmd = cmd; - this.role = role; - this.hostNames = hostNames; - } - - public HadoopDaemonInfo(String cmd, Enum role, String hostFile) - throws IOException { - super(); - this.cmd = cmd; - this.role = role; - File file = new File(getDeployedHadoopConfDir(), hostFile); - BufferedReader reader = null; - hostNames = new ArrayList(); - try { - reader = new BufferedReader(new FileReader(file)); - String host = null; - while ((host = reader.readLine()) != null) { - if (host.trim().isEmpty() || host.startsWith("#")) { - // Skip empty and possible comment lines - // throw new IllegalArgumentException( - // "Hostname could not be found in file " + hostFile); - continue; - } - hostNames.add(host.trim()); - } - if (hostNames.size() < 1) { - throw new IllegalArgumentException("At least one hostname " - + - "is required to be present in file - " + hostFile); - } - } finally { - try { - reader.close(); - } catch (IOException e) { - LOG.warn("Could not close reader"); - } - } - LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " - + hostFile); - } - } - - @Override - public String pushConfig(String localDir) throws IOException { - for (RemoteProcess process : processes){ - process.pushConfig(localDir); - } - return hadoopNewConfDir; - } - - public HadoopDaemonRemoteCluster(List daemonInfos) { - this.daemonInfos = daemonInfos; - this.roles = new HashSet>(); - for (HadoopDaemonInfo info : daemonInfos) { - this.roles.add(info.role); - } - } - - @Override - public void init(Configuration conf) throws IOException { - this.conf = conf; - populateDirectories(conf); - this.processes = new ArrayList(); - populateDaemons(); - } - - @Override - public List getAllProcesses() { - return processes; - } - - @Override - public Set> getRoles() { - return roles; - } - - /** - * Method to populate the hadoop home and hadoop configuration directories. - * - * @param conf - * Configuration object containing values for - * CONF_HADOOPHOME and - * CONF_HADOOPCONFDIR - * - * @throws IllegalArgumentException - * if the configuration or system property set does not contain - * values for the required keys. - */ - protected void populateDirectories(Configuration conf) { - hadoopHome = conf.get(CONF_HADOOPHOME); - hadoopConfDir = conf.get(CONF_HADOOPCONFDIR); - scriptsDir = conf.get(CONF_SCRIPTDIR); - hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR); - if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty() - || hadoopConfDir.isEmpty()) { - LOG.error("No configuration " - + "for the HADOOP_PREFIX and HADOOP_CONF_DIR passed"); - throw new IllegalArgumentException( - "No Configuration passed for hadoop home " + - "and hadoop conf directories"); - } - } - - public static String getDeployedHadoopConfDir() { - String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR); - if (dir == null || dir.isEmpty()) { - LOG.error("No configuration " - + "for the CONF_DEPLOYED_HADOOPCONFDIR passed"); - throw new IllegalArgumentException( - "No Configuration passed for hadoop deployed conf directory"); - } - return dir; - } - - @Override - public void start() throws IOException { - for (RemoteProcess process : processes) { - process.start(); - } - } - - @Override - public void start(String newConfLocation)throws IOException { - for (RemoteProcess process : processes) { - process.start(newConfLocation); - } - } - - @Override - public void stop() throws IOException { - for (RemoteProcess process : processes) { - process.kill(); - } - } - - @Override - public void stop(String newConfLocation) throws IOException { - for (RemoteProcess process : processes) { - process.kill(newConfLocation); - } - } - - protected void populateDaemon(HadoopDaemonInfo info) throws IOException { - for (String host : info.hostNames) { - InetAddress addr = InetAddress.getByName(host); - RemoteProcess process = getProcessManager(info, - addr.getCanonicalHostName()); - processes.add(process); - } - } - - protected void populateDaemons() throws IOException { - for (HadoopDaemonInfo info : daemonInfos) { - populateDaemon(info); - } - } - - @Override - public boolean isMultiUserSupported() throws IOException { - return false; - } - - protected RemoteProcess getProcessManager( - HadoopDaemonInfo info, String hostName) { - RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role); - return process; - } - - /** - * The core daemon class which actually implements the remote process - * management of actual daemon processes in the cluster. - * - */ - class ScriptDaemon implements RemoteProcess { - - private static final String STOP_COMMAND = "stop"; - private static final String START_COMMAND = "start"; - private static final String SCRIPT_NAME = "hadoop-daemon.sh"; - private static final String PUSH_CONFIG ="pushConfig.sh"; - protected final String daemonName; - protected final String hostName; - private final Enum role; - - public ScriptDaemon(String daemonName, String hostName, Enum role) { - this.daemonName = daemonName; - this.hostName = hostName; - this.role = role; - } - - @Override - public String getHostName() { - return hostName; - } - - private String[] getPushConfigCommand(String localDir, String remoteDir, - File scriptDir) throws IOException{ - ArrayList cmdArgs = new ArrayList(); - cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG); - cmdArgs.add(localDir); - cmdArgs.add(hostName); - cmdArgs.add(remoteDir); - cmdArgs.add(hadoopConfDir); - return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]); - } - - private ShellCommandExecutor buildPushConfig(String local, String remote ) - throws IOException { - File scriptDir = new File(scriptsDir); - String[] commandArgs = getPushConfigCommand(local, remote, scriptDir); - HashMap env = new HashMap(); - ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs, - scriptDir, env); - LOG.info(executor.toString()); - return executor; - } - - private ShellCommandExecutor createNewConfDir() throws IOException { - ArrayList cmdArgs = new ArrayList(); - cmdArgs.add("ssh"); - cmdArgs.add(hostName); - cmdArgs.add("if [ -d "+ hadoopNewConfDir+ - " ];\n then echo Will remove existing directory; rm -rf "+ - hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+ - "echo " + hadoopNewConfDir + " doesnt exist hence creating" + - "; mkdir " + hadoopNewConfDir + ";\n fi"); - String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]); - ShellCommandExecutor executor = new ShellCommandExecutor(cmd); - LOG.info(executor.toString()); - return executor; - } - - @Override - public void pushConfig(String localDir) throws IOException { - createNewConfDir().execute(); - buildPushConfig(localDir, hadoopNewConfDir).execute(); - } - - private ShellCommandExecutor buildCommandExecutor(String command, - String confDir) { - String[] commandArgs = getCommand(command, confDir); - File cwd = new File("."); - HashMap env = new HashMap(); - env.put("HADOOP_CONF_DIR", confDir); - ShellCommandExecutor executor - = new ShellCommandExecutor(commandArgs, cwd, env); - LOG.info(executor.toString()); - return executor; - } - - private File getBinDir() { - File binDir = new File(hadoopHome, "bin"); - return binDir; - } - - protected String[] getCommand(String command, String confDir) { - ArrayList cmdArgs = new ArrayList(); - File binDir = getBinDir(); - cmdArgs.add("ssh"); - cmdArgs.add(hostName); - cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME); - cmdArgs.add("--config"); - cmdArgs.add(confDir); - // XXX Twenty internal version does not support --script option. - cmdArgs.add(command); - cmdArgs.add(daemonName); - return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]); - } - - @Override - public void kill() throws IOException { - kill(hadoopConfDir); - } - - @Override - public void start() throws IOException { - start(hadoopConfDir); - } - - public void start(String newConfLocation) throws IOException { - ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND, - newConfLocation); - cme.execute(); - String output = cme.getOutput(); - if (!output.isEmpty()) { //getOutput() never returns null value - if (output.toLowerCase().contains("error")) { - LOG.warn("Error is detected."); - throw new IOException("Start error\n" + output); - } - } - } - - public void kill(String newConfLocation) throws IOException { - ShellCommandExecutor cme - = buildCommandExecutor(STOP_COMMAND, newConfLocation); - cme.execute(); - String output = cme.getOutput(); - if (!output.isEmpty()) { //getOutput() never returns null value - if (output.toLowerCase().contains("error")) { - LOG.info("Error is detected."); - throw new IOException("Kill error\n" + output); - } - } - } - - @Override - public Enum getRole() { - return role; - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java deleted file mode 100644 index 2f9e215ffde..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version - * 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - * implied. See the License for the specific language governing - * permissions and limitations under the License. - */ -package org.apache.hadoop.test.system.process; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo; - -public abstract class MultiUserHadoopDaemonRemoteCluster - extends HadoopDaemonRemoteCluster { - - public MultiUserHadoopDaemonRemoteCluster(List daemonInfos) { - super(daemonInfos); - } - - @Override - protected RemoteProcess getProcessManager( - HadoopDaemonInfo info, String hostName) { - return new MultiUserScriptDaemon(info.cmd, hostName, info.role); - } - - @Override - public boolean isMultiUserSupported() throws IOException { - return true; - } - - class MultiUserScriptDaemon extends ScriptDaemon { - - private static final String MULTI_USER_BINARY_PATH_KEY = - "test.system.hdrc.multi-user.binary.path"; - private static final String MULTI_USER_MANAGING_USER = - "test.system.hdrc.multi-user.managinguser."; - private String binaryPath; - /** - * Manging user for a particular daemon is gotten by - * MULTI_USER_MANAGING_USER + daemonname - */ - private String mangingUser; - - public MultiUserScriptDaemon( - String daemonName, String hostName, Enum role) { - super(daemonName, hostName, role); - initialize(daemonName); - } - - private void initialize(String daemonName) { - binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY); - if (binaryPath == null || binaryPath.trim().isEmpty()) { - throw new IllegalArgumentException( - "Binary path for multi-user path is not present. Please set " - + MULTI_USER_BINARY_PATH_KEY + " correctly"); - } - File binaryFile = new File(binaryPath); - if (!binaryFile.exists() || !binaryFile.canExecute()) { - throw new IllegalArgumentException( - "Binary file path is not configured correctly. Please set " - + MULTI_USER_BINARY_PATH_KEY - + " to properly configured binary file."); - } - mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName); - if (mangingUser == null || mangingUser.trim().isEmpty()) { - throw new IllegalArgumentException( - "Manging user for daemon not present please set : " - + MULTI_USER_MANAGING_USER + daemonName + " to correct value."); - } - } - - @Override - protected String[] getCommand(String command,String confDir) { - ArrayList commandList = new ArrayList(); - commandList.add(binaryPath); - commandList.add(mangingUser); - commandList.add(hostName); - commandList.add("--config " - + confDir + " " + command + " " + daemonName); - return (String[]) commandList.toArray(new String[commandList.size()]); - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java deleted file mode 100644 index d0afe16b260..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.test.system.process; - -import java.io.IOException; - -/** - * Interface to manage the remote process. - */ -public interface RemoteProcess { - /** - * Get the host on which the daemon process is running/stopped.
- * - * @return hostname on which process is running/stopped. - */ - String getHostName(); - - /** - * Start a given daemon process.
- * - * @throws IOException if startup fails. - */ - void start() throws IOException; - /** - * Starts a daemon from user specified conf dir. - * @param newConfLocation is dir where new conf resides. - * @throws IOException if start of process fails from new location. - */ - void start(String newConfLocation) throws IOException; - /** - * Stop a given daemon process.
- * - * @throws IOException if shutdown fails. - */ - void kill() throws IOException; - - /** - * Stops a given daemon running from user specified - * conf dir.
- * @param newConfLocation dir location where new conf resides. - * @throws IOException if kill fails from new conf location. - */ - void kill(String newConfLocation) throws IOException; - /** - * Get the role of the Daemon in the cluster. - * - * @return Enum - */ - Enum getRole(); - - /** - * Pushed the configuration to new configuration directory - * @param localDir The local directory which has config files that will be - * pushed to the remote location - * @throws IOException is thrown if the pushConfig results in a error. - */ - void pushConfig(String localDir) throws IOException; -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java deleted file mode 100644 index 6177c7909a8..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -public interface RemoteExecution { - public void executeCommand (String remoteHostName, String user, - String command) throws Exception; - public int getExitCode(); - public String getOutput(); - public String getCommandString(); -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java deleted file mode 100644 index 704c97d745b..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java +++ /dev/null @@ -1,203 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -import com.jcraft.jsch.*; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.Properties; - -/** - * Remote Execution of commands on a remote machine. - */ - -public class SSHRemoteExecution implements RemoteExecution { - - static final Log LOG = LogFactory.getLog(SSHRemoteExecution.class); - static final int SSH_PORT = 22; - static final String DEFAULT_IDENTITY="id_dsa"; - static final String DEFAULT_KNOWNHOSTS="known_hosts"; - static final String FS = System.getProperty("file.separator"); - static final String LS = System.getProperty("line.separator"); - private int exitCode; - private StringBuffer output; - private String commandString; - - final StringBuffer errorMessage = new StringBuffer(); - public SSHRemoteExecution() throws Exception { - } - - protected String getHomeDir() { - String currentUser=System.getProperty("user.name"); - String userHome=System.getProperty("user.home"); - - return userHome.substring(0, userHome.indexOf(currentUser)-1); - } - - /** - * Execute command at remote host under given user - * @param remoteHostName remote host name - * @param user is the name of the user to be login under; - * current user will be used if this is set to null - * @param command to be executed remotely - * @param identityFile is the name of alternative identity file; default - * is ~user/.ssh/id_dsa - * @param portNumber remote SSH daemon port number, default is 22 - * @throws Exception in case of errors - */ - public void executeCommand (String remoteHostName, String user, - String command, String identityFile, int portNumber) throws Exception { - commandString = command; - String sessionUser = System.getProperty("user.name"); - String userHome=System.getProperty("user.home"); - if (user != null) { - sessionUser = user; - userHome = getHomeDir() + FS + user; - } - String dotSSHDir = userHome + FS + ".ssh"; - String sessionIdentity = dotSSHDir + FS + DEFAULT_IDENTITY; - if (identityFile != null) { - sessionIdentity = identityFile; - } - - JSch jsch = new JSch(); - - Session session = jsch.getSession(sessionUser, remoteHostName, portNumber); - jsch.setKnownHosts(dotSSHDir + FS + DEFAULT_KNOWNHOSTS); - jsch.addIdentity(sessionIdentity); - - Properties config = new Properties(); - config.put("StrictHostKeyChecking", "no"); - session.setConfig(config); - - session.connect(30000); // making a connection with timeout. - - Channel channel=session.openChannel("exec"); - ((ChannelExec)channel).setCommand(command); - channel.setInputStream(null); - - final BufferedReader errReader = - new BufferedReader( - new InputStreamReader(((ChannelExec)channel).getErrStream())); - BufferedReader inReader = - new BufferedReader(new InputStreamReader(channel.getInputStream())); - - channel.connect(); - Thread errorThread = new Thread() { - @Override - public void run() { - try { - String line = errReader.readLine(); - while((line != null) && !isInterrupted()) { - errorMessage.append(line); - errorMessage.append(LS); - line = errReader.readLine(); - } - } catch(IOException ioe) { - LOG.warn("Error reading the error stream", ioe); - } - } - }; - - try { - errorThread.start(); - } catch (IllegalStateException e) { - LOG.debug(e); - } - try { - parseExecResult(inReader); - String line = inReader.readLine(); - while (line != null) { - line = inReader.readLine(); - } - - if(channel.isClosed()) { - exitCode = channel.getExitStatus(); - LOG.debug("exit-status: " + exitCode); - } - try { - // make sure that the error thread exits - errorThread.join(); - } catch (InterruptedException ie) { - LOG.warn("Interrupted while reading the error stream", ie); - } - } catch (Exception ie) { - throw new IOException(ie.toString()); - } - finally { - try { - inReader.close(); - } catch (IOException ioe) { - LOG.warn("Error while closing the input stream", ioe); - } - try { - errReader.close(); - } catch (IOException ioe) { - LOG.warn("Error while closing the error stream", ioe); - } - channel.disconnect(); - session.disconnect(); - } - } - - /** - * Execute command at remote host under given username - * Default identity is ~/.ssh/id_dsa key will be used - * Default known_hosts file is ~/.ssh/known_hosts will be used - * @param remoteHostName remote host name - * @param user is the name of the user to be login under; - * if equals to null then current user name will be used - * @param command to be executed remotely - */ - @Override - public void executeCommand (String remoteHostName, String user, - String command) throws Exception { - executeCommand(remoteHostName, user, command, null, SSH_PORT); - } - - @Override - public int getExitCode() { - return exitCode; - } - - protected void parseExecResult(BufferedReader lines) throws IOException { - output = new StringBuffer(); - char[] buf = new char[512]; - int nRead; - while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) { - output.append(buf, 0, nRead); - } - } - - /** Get the output of the ssh command.*/ - @Override - public String getOutput() { - return (output == null) ? "" : output.toString(); - } - - /** Get the String representation of ssh command */ - @Override - public String getCommandString() { - return commandString; - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh b/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh deleted file mode 100644 index 1230f0e0d35..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# local folder with new configuration file -LOCAL_DIR=$1 -# remote daemon host -HOST=$2 -#remote dir points to the location of new config files -REMOTE_DIR=$3 -# remote daemon HADOOP_CONF_DIR location -DAEMON_HADOOP_CONF_DIR=$4 - -if [ $# -ne 4 ]; then - echo "Wrong number of parameters" >&2 - exit 2 -fi - -ret_value=0 - -echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR} -echo and populates it with new configs prepared in $LOCAL_DIR - -ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR} -ret_value=$? - -# make sure files are writeble -ssh ${HOST} chmod u+w ${REMOTE_DIR}/* - -# copy new files over -scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR} - -err_code=`echo $? + $ret_value | bc` -echo Copying of files from local to remote returned ${err_code} - diff --git a/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java b/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java deleted file mode 100644 index 15eb00cfe33..00000000000 --- a/hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.util; - -import static org.junit.Assert.assertEquals; -import org.junit.Test; - -public class TestSSHRemoteExecution { - - @Test - /** - * Method: executeCommand(String remoteHostName, String user, String command) - */ - public void testExecuteCommandForRemoteHostNameUserCommand() throws Exception { - String command = "ls -l /bin"; - SSHRemoteExecution sshRE = new SSHRemoteExecution(); - sshRE.executeCommand("localhost", null, "ls -l /bin"); - System.out.println(sshRE.getOutput()); - assertEquals("Exit code should is expected to be 0", sshRE.getExitCode(), 0); - assertEquals("Mismatched command string", sshRE.getCommandString(), command); - } - - @Test - /** - * Method: getHomeDir() - */ - public void testGetHomeDir() throws Exception { - SSHRemoteExecution sshRE = new SSHRemoteExecution(); - String ret = sshRE.getHomeDir(); - assertEquals(System.getProperty("user.home"), - ret + System.getProperty("file.separator") + - System.getProperty("user.name")); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj deleted file mode 100644 index 4738c254b2b..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs; - -import java.util.ArrayList; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.hdfs.test.system.DNProtocol; -import org.apache.hadoop.hdfs.test.system.NNProtocol; -import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; - -/** - * This aspect adds two HDFS Herriot specific protocols tp the list of 'authorized' - * Herriot protocols. - * Protocol descriptors i.e. 'security.nn.protocol.acl' have to be added to - * hadoop-policy.xml if present - */ -public privileged aspect HDFSPolicyProviderAspect { - private static final Log LOG = LogFactory - .getLog(HDFSPolicyProviderAspect.class); - - ArrayList herriotHDFSServices = null; - - pointcut updateHDFSServices() : - execution (public Service[] HDFSPolicyProvider.getServices()); - - Service[] around() : updateHDFSServices () { - herriotHDFSServices = new ArrayList(); - for (Service s : HDFSPolicyProvider.hdfsServices) { - LOG.debug("Copying configured protocol to " - + s.getProtocol().getCanonicalName()); - herriotHDFSServices.add(s); - } - herriotHDFSServices.add(new Service("security.daemon.protocol.acl", - DaemonProtocol.class)); - herriotHDFSServices.add(new Service("security.nn.protocol.acl", - NNProtocol.class)); - herriotHDFSServices.add(new Service("security.dn.protocol.acl", - DNProtocol.class)); - final Service[] retArray = herriotHDFSServices - .toArray(new Service[herriotHDFSServices.size()]); - LOG.debug("Number of configured protocols to return: " + retArray.length); - return retArray; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj deleted file mode 100644 index e2f3ec32710..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.datanode; - -import java.io.File; -import java.io.IOException; -import java.util.AbstractList; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.test.system.DNProtocol; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources; - -public privileged aspect DataNodeAspect { - declare parents : DataNode implements DNProtocol; - - public Configuration DataNode.getDaemonConf() { - return super.getConf(); - } - - pointcut dnConstructorPointcut(Configuration conf, AbstractList dirs, - SecureResources resources) : - call(DataNode.new(Configuration, AbstractList, SecureResources)) - && args(conf, dirs, resources); - - after(Configuration conf, AbstractList dirs, SecureResources resources) - returning (DataNode datanode): - dnConstructorPointcut(conf, dirs, resources) { - try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - datanode.setUser(ugi.getShortUserName()); - } catch (IOException e) { - datanode.LOG.warn("Unable to get the user information for the " + - "DataNode"); - } - datanode.setReady(true); - } - - pointcut getVersionAspect(String protocol, long clientVersion) : - execution(public long DataNode.getProtocolVersion(String , - long) throws IOException) && args(protocol, clientVersion); - - long around(String protocol, long clientVersion) : - getVersionAspect(protocol, clientVersion) { - if(protocol.equals(DaemonProtocol.class.getName())) { - return DaemonProtocol.versionID; - } else if(protocol.equals(DNProtocol.class.getName())) { - return DNProtocol.versionID; - } else { - return proceed(protocol, clientVersion); - } - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj deleted file mode 100644 index 068382d4351..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.namenode; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.test.system.NNProtocol; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.system.DaemonProtocol; - -public privileged aspect NameNodeAspect { - declare parents : NameNode implements NNProtocol; - - // Namename doesn't store a copy of its configuration - // because it can be changed through the life cycle of the object - // So, the an exposed reference needs to be added and updated after - // new NameNode(Configuration conf) is complete - Configuration NameNode.configRef = null; - - // Method simply assign a reference to the NameNode configuration object - void NameNode.setRef (Configuration conf) { - if (configRef == null) - configRef = conf; - } - - public Configuration NameNode.getDaemonConf() { - return configRef; - } - - pointcut nnConstructorPointcut(Configuration conf) : - call(NameNode.new(Configuration)) && args(conf); - - after(Configuration conf) returning (NameNode namenode): - nnConstructorPointcut(conf) { - try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - namenode.setUser(ugi.getShortUserName()); - } catch (IOException e) { - namenode.LOG.warn("Unable to get the user information for the " + - "Jobtracker"); - } - namenode.setRef(conf); - namenode.setReady(true); - } - - pointcut getVersionAspect(String protocol, long clientVersion) : - execution(public long NameNode.getProtocolVersion(String , - long) throws IOException) && args(protocol, clientVersion); - - long around(String protocol, long clientVersion) : - getVersionAspect(protocol, clientVersion) { - if(protocol.equals(DaemonProtocol.class.getName())) { - return DaemonProtocol.versionID; - } else if(protocol.equals(NNProtocol.class.getName())) { - return NNProtocol.versionID; - } else { - return proceed(protocol, clientVersion); - } - } -} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml deleted file mode 100644 index 4e540623641..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - - - - - - - - - - test.system.hdrc.hadoophome - $(TO_DO_HADOOP_INSTALL)/share/hadoop-current - This is the path to the home directory of the hadoop deployment. - - - - test.system.hdrc.hadoopconfdir - $(TO_DO_HADOOP_INSTALL)/conf/hadoop - This is the path to the configuration directory of the hadoop - cluster that is deployed. - - - - - test.system.hdrc.dn.hostfile - slaves.localcopy.txt - File name containing the hostnames where the DataNodes are running. - - - - - test.system.hdfs.clusterprocess.impl.class - org.apache.hadoop.hdfs.test.system.HDFSCluster$HDFSProcessManager - - Cluster process manager for the Hdfs subsystem of the cluster. The value - org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager can - be used to enable multi user support. - - - - - test.system.hdrc.deployed.scripts.dir - ./src/test/system/scripts - - This directory hosts the scripts in the deployed location where - the system test client runs. - - - - - test.system.hdrc.hadoopnewconfdir - $(TO_DO_GLOBAL_TMP_DIR)/newconf - - The directory where the new config files will be copied to in all - the clusters is pointed out this directory. - - - - - test.system.hdrc.suspend.cmd - kill -SIGSTOP - - Command for suspending the given process. - - - - - test.system.hdrc.resume.cmd - kill -SIGCONT - - Command for resuming the given suspended process. - - - - test.system.hdrc.hadoop.local.confdir - $(TO_DO_GLOBAL_TMP_DIR)/localconf - - A local directory where a new config file is placed before - being pushed into new config location on the cluster. - - - - - - - test.system.hdfs.clusterprocess.impl.class - org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager - - Enabling multi user based cluster process manger. - - - - test.system.hdrc.multi-user.list.path - $(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers - - Multi user list for creating the proxy users. - - - - test.system.hdrc.multi-user.binary.path - $(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs - - Local file system path on gate way to cluster-controller binary including the binary name. - To build the binary the following commands need to be executed: - % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster) - % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path - Location of the cluster is important security precaution. - The binary should be owned by root and test user group permission should be set such a - way that it can be executed by binary. Example usage would be: - % sudo chown root binary - % sudo chmod 6511 binary - Change permission appropriately to make it more secure. - - - - test.system.hdrc.multi-user.managinguser.namenode - * - - User value for managing the particular daemon, please note that these user should be - present on gateways also, an example configuration for the above would be - key name = test.system.hdrc.multi-user.managinguser.namenode - key value = guest - Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command. - - - - test.system.hdrc.multi-user.managinguser.datanode - * - - - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java deleted file mode 100644 index 2376892c53b..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.test.system.process.RemoteProcess; - -/** - * Datanode client for system tests. Assumption of the class is that the - * configuration key is set for the configuration key : {@code - * DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY} is set, only the port portion of - * the address is used. - */ -public class DNClient extends HDFSDaemonClient { - - DNProtocol proxy; - private static final String HADOOP_DATANODE_OPTS_ENV = "HADOOP_DATANODE_OPTS"; - - public DNClient(Configuration conf, RemoteProcess process) throws IOException { - super(conf, process); - } - - @Override - public void connect() throws IOException { - if (isConnected()) { - return; - } - String sockAddrStr = getConf().get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY); - if (sockAddrStr == null) { - throw new IllegalArgumentException("Datenode IPC address is not set." - + "Check if " + DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY - + " is configured."); - } - String[] splits = sockAddrStr.split(":"); - if (splits.length != 2) { - throw new IllegalArgumentException( - "Datanode IPC address is not correctly configured"); - } - String port = splits[1]; - String sockAddr = getHostName() + ":" + port; - InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr); - proxy = (DNProtocol) RPC.getProxy(DNProtocol.class, DNProtocol.versionID, - bindAddr, getConf()); - setConnected(true); - } - - @Override - public void disconnect() throws IOException { - RPC.stopProxy(proxy); - setConnected(false); - } - - @Override - protected DNProtocol getProxy() { - return proxy; - } - - public Configuration getDatanodeConfig() throws IOException { - return getProxy().getDaemonConf(); - } - - @Override - public String getHadoopOptsEnvName() { - return HADOOP_DATANODE_OPTS_ENV; - } - - /** - * Concrete implementation of abstract super class method - * @param attributeName name of the attribute to be retrieved - * @return Object value of the given attribute - * @throws IOException is thrown in case of communication errors - */ - @Override - public Object getDaemonAttribute (String attributeName) throws IOException { - return getJmxAttribute("DataNode", "DataNodeInfo", attributeName); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java deleted file mode 100644 index 31bdd7f0aa9..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.test.system.DaemonProtocol; - -/** - * Client side API exposed from Datanode. - * Actual implementations are likely to be injected - * - * The protocol has to be annotated so KerberosInfo can be filled in during - * creation of a ipc.Client connection - */ -@KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY) -public interface DNProtocol extends DaemonProtocol { - public static final long versionID = 1L; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java deleted file mode 100644 index d9504f8faec..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.apache.hadoop.test.system.AbstractDaemonCluster; -import org.apache.hadoop.test.system.process.ClusterProcessManager; -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster; -import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster; -import org.apache.hadoop.test.system.process.RemoteProcess; -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo; - -public class HDFSCluster extends AbstractDaemonCluster { - - static { - Configuration.addDefaultResource("hdfs-site.xml"); - } - - private static final Log LOG = LogFactory.getLog(HDFSCluster.class); - public static final String CLUSTER_PROCESS_MGR_IMPL = - "test.system.hdfs.clusterprocess.impl.class"; - - private HDFSCluster(Configuration conf, ClusterProcessManager rCluster) - throws IOException { - super(conf, rCluster); - } - - /** - * Key is used to to point to the file containing hostnames of tasktrackers - */ - public static final String CONF_HADOOP_DN_HOSTFILE_NAME = - "test.system.hdrc.dn.hostfile"; - - private static List hdfsDaemonInfos; - - private static String nnHostName; - private static String DN_hostFileName; - - protected enum Role {NN, DN} - - @Override - protected AbstractDaemonClient - createClient(RemoteProcess process) throws IOException { - Enum pRole = process.getRole(); - if (Role.NN.equals(pRole)) { - return createNNClient(process); - } else if (Role.DN.equals(pRole)) { - return createDNClient(process); - } else throw new IOException("Role " + pRole + - " is not supported by HDFSCluster"); - } - - protected DNClient createDNClient(RemoteProcess dnDaemon) throws IOException { - return new DNClient(getConf(), dnDaemon); - } - - protected NNClient createNNClient(RemoteProcess nnDaemon) throws IOException { - return new NNClient(getConf(), nnDaemon); - } - - public NNClient getNNClient () { - Iterator iter = getDaemons().get(Role.NN).iterator(); - return (NNClient) iter.next(); - } - - public List getDNClients () { - return (List) getDaemons().get(Role.DN); - } - - public DNClient getDNClient (String hostname) { - for (DNClient dnC : getDNClients()) { - if (dnC.getHostName().equals(hostname)) - return dnC; - } - return null; - } - - public static class HDFSProcessManager extends HadoopDaemonRemoteCluster { - public HDFSProcessManager() { - super(hdfsDaemonInfos); - } - } - - public static class MultiUserHDFSProcessManager - extends MultiUserHadoopDaemonRemoteCluster { - public MultiUserHDFSProcessManager() { - super(hdfsDaemonInfos); - } - } - - - public static HDFSCluster createCluster(Configuration conf) throws Exception { - conf.addResource("system-test.xml"); - String sockAddrStr = FileSystem.getDefaultUri(conf).getAuthority(); - if (sockAddrStr == null) { - throw new IllegalArgumentException("Namenode IPC address is not set"); - } - String[] splits = sockAddrStr.split(":"); - if (splits.length != 2) { - throw new IllegalArgumentException( - "Namenode report IPC is not correctly configured"); - } - nnHostName = splits[0]; - DN_hostFileName = conf.get(CONF_HADOOP_DN_HOSTFILE_NAME, "slaves"); - - hdfsDaemonInfos = new ArrayList(); - hdfsDaemonInfos.add(new HadoopDaemonInfo("namenode", - Role.NN, Arrays.asList(new String[]{nnHostName}))); - hdfsDaemonInfos.add(new HadoopDaemonInfo("datanode", - Role.DN, DN_hostFileName)); - - String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL); - if (implKlass == null || implKlass.isEmpty()) { - implKlass = HDFSCluster.HDFSProcessManager.class.getName(); - } - Class klass = - (Class) Class.forName(implKlass); - ClusterProcessManager clusterProcessMgr = klass.newInstance(); - LOG.info("Created ClusterProcessManager as " + implKlass); - clusterProcessMgr.init(conf); - return new HDFSCluster(conf, clusterProcessMgr); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java deleted file mode 100644 index 4316b36ac3b..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.test.system.process.RemoteProcess; - -public abstract class HDFSDaemonClient - extends AbstractDaemonClient { - - public HDFSDaemonClient(Configuration conf, RemoteProcess process) - throws IOException { - super(conf, process); - } - - public String[] getHDFSDataDirs() throws IOException { - return getProxy().getDaemonConf().getStrings( - DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY); - } - - public String getHDFSNameDirs() throws IOException { - return getProxy().getDaemonConf().getStrings( - DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0]; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java deleted file mode 100644 index 79be0e18f8e..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.test.system.process.RemoteProcess; - -public class NNClient extends HDFSDaemonClient { - - NNProtocol proxy; - private static final String HADOOP_NAMENODE_OPTS_ENV = "HADOOP_NAMENODE_OPTS"; - - public NNClient(Configuration conf, RemoteProcess process) throws IOException { - super(conf, process); - } - - @Override - public void connect() throws IOException { - if (isConnected()) - return; - String sockAddrStr = FileSystem.getDefaultUri(getConf()).getAuthority(); - if (sockAddrStr == null) { - throw new IllegalArgumentException("Namenode IPC address is not set"); - } - String[] splits = sockAddrStr.split(":"); - if (splits.length != 2) { - throw new IllegalArgumentException( - "Namenode report IPC is not correctly configured"); - } - String port = splits[1]; - String sockAddr = getHostName() + ":" + port; - - InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr); - proxy = (NNProtocol) RPC.getProxy(NNProtocol.class, NNProtocol.versionID, - bindAddr, getConf()); - setConnected(true); - } - - @Override - public void disconnect() throws IOException { - RPC.stopProxy(proxy); - setConnected(false); - } - - @Override - protected NNProtocol getProxy() { - return proxy; - } - - @Override - public String getHadoopOptsEnvName() { - return HADOOP_NAMENODE_OPTS_ENV; - } - - /** - * Concrete implementation of abstract super class method - * @param attributeName name of the attribute to be retrieved - * @return Object value of the given attribute - * @throws IOException is thrown in case of communication errors - */ - @Override - public Object getDaemonAttribute (String attributeName) throws IOException { - return getJmxAttribute("NameNode", "NameNodeInfo", attributeName); - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java deleted file mode 100644 index 2665d23d4ba..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.test.system; - -import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.test.system.DaemonProtocol; - -/** - * Client side API exposed from Namenode. - * Actual implementations are likely to be injected - * - * The protocol has to be annotated so KerberosInfo can be filled in during - * creation of a ipc.Client connection - */ -@KerberosInfo( - serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY) -public interface NNProtocol extends DaemonProtocol { - public static final long versionID = 1L; -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java deleted file mode 100644 index 43dcae541d2..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs; - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.test.system.DNClient; -import org.apache.hadoop.hdfs.test.system.HDFSCluster; -import org.apache.hadoop.hdfs.test.system.NNClient; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mortbay.util.ajax.JSON; - -public class TestHL040 { - private HDFSCluster cluster = null; - private static final Log LOG = LogFactory.getLog(TestHL040.class); - - public TestHL040() throws Exception { - } - - @Before - public void setupUp() throws Exception { - cluster = HDFSCluster.createCluster(new Configuration()); - cluster.setUp(); - } - - @After - public void tearDown() throws Exception { - cluster.tearDown(); - } - - @Test - public void testConnect() throws IOException { - LOG.info("Staring TestHL040: connecting to the HDFSCluster "); - LOG.info("================ Getting namenode info ================"); - NNClient dfsMaster = cluster.getNNClient(); - LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " + - dfsMaster.getProcessInfo()); - LOG.info("================ Getting datanode info ================"); - Collection clients = cluster.getDNClients(); - for (DNClient dnC : clients) { - LOG.info("Process info of datanode " + dnC.getHostName() + " is: " + - dnC.getProcessInfo()); - Assert.assertNotNull("Datanode process info isn't suppose to be null", - dnC.getProcessInfo()); - LOG.info("Free space " + getFreeSpace(dnC)); - } - } - - private long getFreeSpace(DNClient dnC) throws IOException { - Object volObj = dnC.getDaemonAttribute("VolumeInfo"); - Assert.assertNotNull("Attribute value is expected to be not null", volObj); - LOG.debug("Got object: " + volObj); - Map volInfoMap = (Map) JSON.parse(volObj.toString()); - long totalFreeSpace = 0L; - for (Object key : volInfoMap.keySet()) { - Map attrMap = (Map) volInfoMap.get(key); - long freeSpace = (Long) attrMap.get("freeSpace"); - totalFreeSpace += freeSpace; - } - return totalFreeSpace; - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java deleted file mode 100644 index b70fb9aeffb..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobSubmission; -import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobVerification; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobStory; -import org.apache.hadoop.tools.rumen.ZombieJob; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.mapreduce.JobID; -import org.junit.AfterClass; -import org.junit.BeforeClass; - -import java.util.Iterator; -import java.util.Map; -import java.util.List; -import java.util.Set; -import java.io.IOException; -import org.junit.Assert; - -/** - * Run and verify the Gridmix jobs for given a trace. - */ -public class GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(GridmixSystemTestCase.class); - protected static Configuration conf = new Configuration(); - protected static MRCluster cluster; - protected static int cSize; - protected static JTClient jtClient; - protected static JTProtocol rtClient; - protected static Path gridmixDir; - protected static Map map; - protected static GridmixJobSubmission gridmixJS; - protected static GridmixJobVerification gridmixJV; - protected static List jobids; - - @BeforeClass - public static void before() throws Exception { - String [] excludeExpList = {"java.net.ConnectException", - "java.io.IOException"}; - cluster = MRCluster.createCluster(conf); - cluster.setExcludeExpList(excludeExpList); - cluster.setUp(); - cSize = cluster.getTTClients().size(); - jtClient = cluster.getJTClient(); - rtClient = jtClient.getProxy(); - gridmixDir = new Path("herriot-gridmix"); - UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf()); - map = UtilsForGridmix.getMRTraces(rtClient.getDaemonConf()); - } - - @AfterClass - public static void after() throws Exception { - UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf()); - org.apache.hadoop.fs.FileUtil.fullyDelete(new java.io.File(System. - getProperty("java.io.tmpdir") + "/gridmix-st/")); - cluster.tearDown(); - - /* Clean up the proxy user directories if gridmix run with - RoundRobinUserResovler mode.*/ - if (gridmixJV != null - && gridmixJV.getJobUserResolver().contains("RoundRobin")) { - List proxyUsers = - UtilsForGridmix.listProxyUsers(gridmixJS.getJobConf(), - UserGroupInformation.getLoginUser().getShortUserName()); - for(int index = 0; index < proxyUsers.size(); index++){ - UtilsForGridmix.cleanup(new Path("hdfs:///user/" + - proxyUsers.get(index)), - rtClient.getDaemonConf()); - } - } - } - - /** - * Run the gridmix with specified runtime parameters and - * verify the jobs the after completion of execution. - * @param runtimeValues - common runtime arguments for gridmix. - * @param otherValues - test specific runtime arguments for gridmix. - * @param tracePath - path of a trace file. - * @throws Exception - if an exception occurs. - */ - public static void runGridmixAndVerify(String[] runtimeValues, - String [] otherValues, String tracePath) throws Exception { - runGridmixAndVerify(runtimeValues, otherValues, tracePath , - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Run the gridmix with specified runtime parameters and - * verify the jobs the after completion of execution. - * @param runtimeValues - common runtime arguments for gridmix. - * @param otherValues - test specific runtime arguments for gridmix. - * @param tracePath - path of a trace file. - * @param mode - 1 for data generation, 2 for run the gridmix and 3 for - * data generation and run the gridmix. - * @throws Exception - if an exception occurs. - */ - public static void runGridmixAndVerify(String [] runtimeValues, - String [] otherValues, String tracePath, int mode) throws Exception { - List jobids = runGridmix(runtimeValues, otherValues, mode); - gridmixJV = new GridmixJobVerification(new Path(tracePath), - gridmixJS.getJobConf(), jtClient); - gridmixJV.verifyGridmixJobsWithJobStories(jobids); - } - - /** - * Run the gridmix with user specified mode. - * @param runtimeValues - common runtime parameters for gridmix. - * @param otherValues - test specifix runtime parameters for gridmix. - * @param mode - 1 for data generation, 2 for run the gridmix and 3 for - * data generation and run the gridmix. - * @return - list of gridmix job ids. - * @throws Exception - if an exception occurs. - */ - public static List runGridmix(String[] runtimeValues, - String[] otherValues, int mode) throws Exception { - gridmixJS = new GridmixJobSubmission(rtClient.getDaemonConf(), - jtClient, gridmixDir); - gridmixJS.submitJobs(runtimeValues, otherValues, mode); - List jobids = - UtilsForGridmix.listGridmixJobIDs(jtClient.getClient(), - gridmixJS.getGridmixJobCount()); - return jobids; - } - - /** - * get the trace file based on given regular expression. - * @param regExp - trace file file pattern. - * @return - trace file as string. - * @throws IOException - if an I/O error occurs. - */ - public static String getTraceFile(String regExp) throws IOException { - List listTraces = UtilsForGridmix.listMRTraces( - rtClient.getDaemonConf()); - Iterator ite = listTraces.iterator(); - while(ite.hasNext()) { - String traceFile = ite.next(); - if (traceFile.indexOf(regExp)>=0) { - return traceFile; - } - } - return null; - } - - /** - * Validate the task memory parameters. - * @param tracePath - trace file. - * @param isTraceHasHighRamJobs - true if trace has high ram job(s) - * otherwise its false - */ - @SuppressWarnings("deprecation") - public static void validateTaskMemoryParamters(String tracePath, - boolean isTraceHasHighRamJobs) throws IOException { - if (isTraceHasHighRamJobs) { - GridmixJobStory gjs = new GridmixJobStory(new Path(tracePath), - rtClient.getDaemonConf()); - Set jobids = gjs.getZombieJobs().keySet(); - boolean isHighRamFlag = false; - for (JobID jobid :jobids) { - ZombieJob zombieJob = gjs.getZombieJobs().get(jobid); - JobConf origJobConf = zombieJob.getJobConf(); - int origMapFactor = - GridmixJobVerification.getMapFactor(origJobConf); - int origReduceFactor = - GridmixJobVerification.getReduceFactor(origJobConf); - if (origMapFactor >= 2 || origReduceFactor >= 2) { - isHighRamFlag = true; - long TaskMapMemInMB = - GridmixJobVerification.getScaledTaskMemInMB( - GridMixConfig.JOB_MAP_MEMORY_MB, - GridMixConfig.CLUSTER_MAP_MEMORY, - origJobConf, rtClient.getDaemonConf()); - - long TaskReduceMemInMB = - GridmixJobVerification.getScaledTaskMemInMB( - GridMixConfig.JOB_REDUCE_MEMORY_MB, - GridMixConfig.CLUSTER_REDUCE_MEMORY, - origJobConf, rtClient.getDaemonConf()); - long taskMapLimitInMB = - conf.getLong(GridMixConfig.CLUSTER_MAX_MAP_MEMORY, - JobConf.DISABLED_MEMORY_LIMIT); - - long taskReduceLimitInMB = - conf.getLong(GridMixConfig.CLUSTER_MAX_REDUCE_MEMORY, - JobConf.DISABLED_MEMORY_LIMIT); - - GridmixJobVerification.verifyMemoryLimits(TaskMapMemInMB, - taskMapLimitInMB); - GridmixJobVerification.verifyMemoryLimits(TaskReduceMemInMB, - taskReduceLimitInMB); - } - } - Assert.assertTrue("Trace doesn't have atleast one high ram job.", - isHighRamFlag); - } - } - - public static boolean isLocalDistCache(String fileName, String userName, - boolean visibility) { - return DistributedCacheEmulator.isLocalDistCacheFile(fileName, - userName, visibility); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java deleted file mode 100644 index dd8e51edfd7..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test cpu emulation with default interval for gridmix jobs - * against different input data, submission policies and user resolvers. - * Verify the cpu resource metrics of both maps and reduces phase of - * Gridmix jobs with their corresponding original job in the input trace. - */ -public class TestCPUEmulationForMapsAndReducesWithCustomInterval - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestCPUEmulationWithUncompressedInput.class"); - int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue(); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * cpu emulation feature with default setting. The {@link Gridmix} - * should use the following runtime parameters. - * Submission Policy : STRESS, UserResovler: RoundRobinUserResolver. - * Once the {@link Gridmix} run is complete, verify cpu resource metrics of - * {@link Gridmix} jobs with their corresponding original job in a trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulationForMapsAndReducesWithCompressedInputCase7() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("cpu_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * cpu emulation feature with default setting. The {@link Gridmix} - * should use the following runtime parameters. - * Submission Policy : SERIAL, UserResovler: SubmitterUserResolver - * Once the {@link Gridmix} run is complete, verify cpu resource metrics of - * {@link Gridmix} jobs with their corresponding original job in a trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase8() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("cpu_emul_case2"); - Assert.assertNotNull("Trace file not found.", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.4F", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } -} - - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java deleted file mode 100644 index edd14a6bcbf..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.JobContext; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test cpu emulation with default interval for gridmix jobs - * against different input data, submission policies and user resolvers. - * Verify the cpu resource metrics for both maps and reduces of - * Gridmix jobs with their corresponding original job in the input trace. - */ -public class TestCPUEmulationForMapsAndReducesWithDefaultInterval - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestCPUEmulationForMapsAndReducesWithDefaultInterval.class"); - int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue(); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * cpu emulation feature with default setting. The {@link Gridmix} - * should use the following runtime parameters. - * Submission Policy : REPLAY, UserResovler: RoundRobinUserResolver. - * Once the {@link Gridmix} run is complete, verify cpu resource metrics of - * {@link Gridmix} jobs with their corresponding original jobs in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulationForMapsAndReducesWithCompressedInputCase5() - throws Exception { - final long inputSizeInMB = 7168; - String tracePath = getTraceFile("cpu_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * cpu emulation feature with default settings. The {@link Gridmix} - * should use the following runtime parameters. - * Submission Policy : STRESS, UserResovler: SubmitterUserResolver - * Once the Gridmix run is complete, verify cpu resource metrics of - * {@link Gridmix} jobs with their corresponding original jobs in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase6() - throws Exception { - final long inputSizeInMB = cSize * 400; - String tracePath = getTraceFile("cpu_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java deleted file mode 100644 index ac5205818cc..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} cpu emulation with custom interval for - * gridmix jobs against different input data, submission policies and - * user resolvers. Verify the map phase cpu metrics of gridmix jobs - * against their original job in the trace. - */ -public class TestCPUEmulationForMapsWithCustomInterval - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestCPUEmulationForMapsWithCustomInterval.class"); - int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue(); - - /** - * Generate compressed input and run {@link Gridmix} by turning on - * cpu emulation feature with custom setting. The {@link Gridmix} should - * use the following runtime parameters while running gridmix jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Once {@link Gridmix} run is complete, verify maps phase cpu resource - * metrics of {@link Gridmix} jobs with their corresponding original - * in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsWithCompressedInputCase3() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("cpu_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.25F"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on - * cpu emulation feature with custom settings. The {@link Gridmix} - * should use the following runtime paramters while running gridmix jobs. - * Submission Policy: REPLAY User Resolver Mode: RoundRobinUserResolver - * Once {@link Gridmix} run is complete, verify the map phase cpu resource - * metrics of {@link Gridmix} jobs with their corresponding jobs - * in the original trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsUnCompressedInputCase4() - throws Exception { - final long inputSizeInMB = cSize * 200; - String tracePath = getTraceFile("cpu_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN, - "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java deleted file mode 100644 index 6eabc53838c..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} cpu emulation with default settings for - * gridmix jobs against different input data, submission policies and - * user resolvers. Verify the map phase cpu metrics of gridmix jobs - * against their original jobs in the trace. - */ -public class TestCPUEmulationForMapsWithDefaultInterval - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestCPUEmulationForMapsWithDefaultInterval.class"); - int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue(); - - /** - * Generate compressed input and run {@link Gridmix} by turning on cpu - * emulation feature with default settings. The {@link Gridmix} should - * use the following runtime parameters while running the gridmix jobs. - * Submission Policy: STRESS, UserResolver: SubmitterUserResolver. - * Once the {@link Gridmix} run is complete, verify map phase cpu metrics of - * {@link Gridmix} jobs with their corresponding original job in a trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsWithCompressedInputCase1() - throws Exception { - final long inputSizeInMB = 1024 * 6; - String tracePath = getTraceFile("cpu_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on - * cpu emulation feature with default settings. The {@link Gridmix} - * should use the following runtime parameters while running Gridmix jobs. - * Submission Policy: REPLAY, UserResolver: RoundRobinUserResolver - * Once the Gridmix run is complete, verify cpu resource metrics of - * {@link Gridmix} jobs with their corresponding original job in a trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testCPUEmulatonForMapsWithUnCompressedInputCase2() - throws Exception { - final long inputSizeInMB = cSize * 200; - String tracePath = getTraceFile("cpu_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + - GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java deleted file mode 100644 index 3ade9e34e68..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the compression emulation for all the jobs in the trace - * irrespective of compressed inputs. - */ -public class TestCompressionEmulationEnableForAllTypesOfJobs - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestCompressionEmulationEnableForAllTypesOfJobs.class"); - - /** - * Generate compressed input data and verify the compression emulation - * for all the jobs in the trace irrespective of whether the original - * job uses the compressed input or not.Also use the custom compression - * ratios for map input, map output and reduce output. - * @throws Exception - if an error occurs. - */ - @Test - public void testInputCompressionEmualtionEnableForAllJobsWithDefaultRatios() - throws Exception { - final long inputSizeInMB = 1024 * 6; - final String tracePath = getTraceFile("compression_case4_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true", - "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.46", - "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35", - "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.36" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Use existing compressed input data and turn off the compression - * emulation. Verify the compression emulation whether it uses - * by the jobs or not. - * @throws Exception - if an error occurs. - */ - @Test - public void testInputCompressionEmulationEnableForAllJobsWithCustomRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case4_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java deleted file mode 100644 index 4b7fc3a15aa..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Assert; -import org.junit.Test; -/** - * Verify the gridmix jobs compression ratio's of input, - * intermediate input and with default/custom ratios.Also verify - * the compressed output file format is enabled or not. - * - */ -public class TestCompressionEmulationForCompressInAndUncompressOut - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestCompressionEmulationForCompressInAndUncompressOut.class"); - final long inputSizeInMB = 1024 * 6; - - /** - * Generate a compressed input data and verify the compression ratios - * of map input and map output against default compression ratios - * and also verify the whether the compressed output file output format - * is enabled or not. - * @throws Exception -if an error occurs. - */ - @Test - public void testCompressionEmulationOfCompressedInputWithDefaultRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case2_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Use existing compressed input data and verify the compression ratios - * of input and intermediate input against custom compression ratios - * and also verify the compressed output file output format is enabled or not. - * @throws Exception -if an error occurs. - */ - @Test - public void testCompressionEmulationOfCompressedInputWithCustomRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case2_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf()); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true", - "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.58", - "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.42" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java deleted file mode 100644 index 383fc83de4b..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Assert; -import org.junit.Test; -/** - * Verify the gridmix jobs compression ratio's of reduce output and - * with default and custom ratios. - */ -public class TestCompressionEmulationForUncompressInAndCompressOut - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestCompressionEmulationForUncompressInAndCompressOut.class"); - final long inputSizeInMB = 1024 * 6; - - /** - * Generate a uncompressed input data and verify the compression ratios - * of reduce output against default output compression ratio. - * @throws Exception -if an error occurs. - */ - @Test - public void testCompressionEmulationOfCompressedOuputWithDefaultRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case3_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Use existing uncompressed input data and verify the compression ratio - * of reduce output against custom output compression ratio and also verify - * the compression output file output format. - * @throws Exception -if an error occurs. - */ - @Test - public void testCompressionEmulationOfCompressedOutputWithCustomRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case3_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf()); - final String [] runtimeValues = { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath }; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.38" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java deleted file mode 100644 index bb77016e120..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.GridmixJob; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the {@link Gridmix} with a high ram jobs trace by disabling the - * emulation of high ram and verify each {@link Gridmix} job - * whether it honors the high ram or not. In disable mode it should - * should not honor the high ram and run it as a normal job. - */ -public class TestDisableGridmixEmulationOfHighRam - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestDisableGridmixEmulationOfHighRam.class"); - - /** - * Generate input data and run {@link Gridmix} with a high ram jobs trace - * as a load job and STRESS submission policy in a SubmitterUserResolver - * mode. Verify each {@link Gridmix} job whether it honors the - * high ram or not after completion of execution. In disable mode the - * jobs should not honor the high ram. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHighRamForReducersOfMRJobs() - throws Exception { - final long inputSizeInMB = cSize * 250; - String tracePath = getTraceFile("highram_mr_jobs_case3"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java deleted file mode 100644 index a1ae1e9dfaf..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the emulation of HDFS and Local FS distributed cache files against - * the given input trace file. - */ -public class TestEmulationOfHDFSAndLocalFSDCFiles extends - GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class"); - - /** - * Generate the input data and distributed cache files for HDFS and - * local FS. Verify the gridmix emulation of HDFS and Local FS - * distributed cache files in RoundRobinUserResolver mode with STRESS - * submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateDataEmulateHDFSAndLocalFSDCFiles() - throws Exception { - final long inputSizeInMB = 1024 * 6; - final String tracePath = getTraceFile("distcache_case8_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Use existing input and distributed cache files for HDFS and - * local FS. Verify the gridmix emulation of HDFS and Local FS - * distributed cache files in SubmitterUserResolver mode with REPLAY - * submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHDFSAndLocalFSDCFiles() - throws Exception { - final String tracePath = getTraceFile("distcache_case8_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues ={"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java deleted file mode 100644 index 7f8938f88a7..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix emulation of HDFS distributed cache file which uses - * different jobs that are submitted with different users. - */ -public class TestEmulationOfHDFSDCFileUsesMultipleJobs extends - GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestEmulationOfHDFSDCFileUsesMultipleJobs.class"); - - /** - * Generate the input data and HDFS distributed cache file based - * on given input trace. Verify the Gridmix emulation of HDFS - * distributed cache file in RoundRobinResolver mode with - * STRESS submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulationOfHDFSDCFile() - throws Exception { - final long inputSizeInMB = 1024 * 6; - final String tracePath = getTraceFile("distcache_case9_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Verify the Gridmix emulation of HDFS distributed cache - * file in SubmitterUserResolver mode with STRESS submission policy - * by using the existing input data and HDFS distributed cache file. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixEmulationOfHDFSPublicDCFile() - throws Exception { - final String tracePath = getTraceFile("distcache_case9_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - tracePath}; - - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java deleted file mode 100644 index 453e5b99081..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix emulation of HDFS distributed cache files of - * different visibilities. - */ - -public class TestEmulationOfHDFSDCFilesWithDifferentVisibilities - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestEmulationOfHDFSDCFilesWithDifferentVisibilities.class"); - - /** - * Generate input data and HDFS distributed cache files of different - * visibilities based on given input trace. Verify the Gridmix emulation - * of HDFS distributed cache files of different visibilities in - * RoundRobinUserResolver mode with SERIAL submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulateOfHDFSDCFilesWithDiffVisibilities() - throws Exception { - final long INPUT_SIZE = 1024 * 9; - final String tracePath = getTraceFile("distcache_case5_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - INPUT_SIZE+"m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Disable the distributed cache emulation and verify the Gridmix jobs - * whether it emulates or not. - * @throws Exception - */ - @Test - public void testHDFSDCFilesWithoutEnableDCEmulation() - throws Exception { - final String tracePath = getTraceFile("distcache_case6_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues ={ "LOADJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - tracePath}; - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java deleted file mode 100644 index cb3a35f4270..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the {@link Gridmix} with combination of high ram and normal jobs of - * trace and verify whether high ram jobs{@link Gridmix} are honoring or not. - * Normal MR jobs should not honors the high ram emulation. - */ -public class TestEmulationOfHighRamAndNormalMRJobs - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestEmulationOfHighRamAndNormalMRJobs.class"); - - /** - * Generate input data and run the combination normal and high ram - * {@link Gridmix} jobs as load job and STRESS submission policy - * in a SubmitterUserResolver mode. Verify whether each {@link Gridmix} - * job honors the high ram or not after completion of execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHighRamForReducersOfMRJobs() - throws Exception { - final long inputSizeInMB = cSize * 250; - String tracePath = getTraceFile("highram_mr_jobs_case4"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeArgs = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - tracePath}; - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=true"}; - - validateTaskMemoryParamters(tracePath, true); - runGridmixAndVerify(runtimeArgs, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java deleted file mode 100644 index eff47f2d641..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the emulation of local FS distributed cache files. - * - */ -public class TestEmulationOfLocalFSDCFiles extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class"); - - /** - * Generate the input data and distributer cache files.Verify the - * gridmix emulation of local file system distributed cache files - * in RoundRobinUserResolver mode with REPLAY submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateInputAndEmulateLocalFSDCFile() - throws Exception { - final long inputSizeInMB = 1024 * 6; - final String tracePath = getTraceFile("distcache_case7_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Use existing input and local distributed cache files and verify - * the gridmix emulation of local file system distributed cache - * files in SubmitterUserResolver mode with STRESS - * Submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfLocalFSDCFile() - throws Exception { - final String tracePath = getTraceFile("distcache_case7_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java deleted file mode 100644 index ef273b5fd25..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java +++ /dev/null @@ -1,229 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.gridmix.RoundRobinUserResolver; -import org.apache.hadoop.mapred.gridmix.EchoUserResolver; -import org.apache.hadoop.mapred.gridmix.SubmitterUserResolver; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.ContentSummary; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import org.junit.Assert; -import java.io.IOException; - -/** - * Verify the Gridmix data generation with various submission policies and - * user resolver modes. - */ -public class TestGridMixDataGeneration { - private static final Log LOG = - LogFactory.getLog(TestGridMixDataGeneration.class); - private static Configuration conf = new Configuration(); - private static MRCluster cluster; - private static JTClient jtClient; - private static JTProtocol rtClient; - private static Path gridmixDir; - private static int cSize; - - @BeforeClass - public static void before() throws Exception { - String [] excludeExpList = {"java.net.ConnectException", - "java.io.IOException"}; - cluster = MRCluster.createCluster(conf); - cluster.setExcludeExpList(excludeExpList); - cluster.setUp(); - cSize = cluster.getTTClients().size(); - jtClient = cluster.getJTClient(); - rtClient = jtClient.getProxy(); - gridmixDir = new Path("herriot-gridmix"); - UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf()); - } - - @AfterClass - public static void after() throws Exception { - UtilsForGridmix.cleanup(gridmixDir,conf); - cluster.tearDown(); - } - - /** - * Generate the data in a STRESS submission policy with SubmitterUserResolver - * mode and verify whether the generated data matches with given - * input size or not. - * @throws IOException - */ - @Test - public void testGenerateDataWithSTRESSSubmission() throws Exception { - conf = rtClient.getDaemonConf(); - final long inputSizeInMB = cSize * 128; - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file:///dev/null"}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - int exitCode = - UtilsForGridmix.runGridmixJob(gridmixDir, conf, - GridMixRunMode.DATA_GENERATION.getValue(), - runtimeValues, otherArgs); - Assert.assertEquals("Data generation has failed.", 0 , exitCode); - checkGeneratedDataAndJobStatus(inputSizeInMB); - } - - /** - * Generate the data in a REPLAY submission policy with RoundRobinUserResolver - * mode and verify whether the generated data matches with the given - * input size or not. - * @throws Exception - */ - @Test - public void testGenerateDataWithREPLAYSubmission() throws Exception { - conf = rtClient.getDaemonConf(); - final long inputSizeInMB = cSize * 300; - String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB +"m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - "file:///dev/null"}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - int exitCode = - UtilsForGridmix.runGridmixJob(gridmixDir, conf, - GridMixRunMode.DATA_GENERATION.getValue(), - runtimeValues, otherArgs); - Assert.assertEquals("Data generation has failed.", 0 , exitCode); - checkGeneratedDataAndJobStatus(inputSizeInMB); - } - - /** - * Generate the data in a SERIAL submission policy with EchoUserResolver - * mode and also set the no.of bytes per file in the data.Verify whether each - * file size matches with given per file size or not and also - * verify the overall size of generated data. - * @throws Exception - */ - @Test - public void testGenerateDataWithSERIALSubmission() throws Exception { - conf = rtClient.getDaemonConf(); - long perNodeSizeInMB = 500; // 500 mb per node data - final long inputSizeInMB = cSize * perNodeSizeInMB; - String [] runtimeValues ={"LOADJOB", - EchoUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - "file:///dev/null"}; - long bytesPerFile = 200 * 1024 * 1024; // 200 mb per file of data - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile, - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - int exitCode = - UtilsForGridmix.runGridmixJob(gridmixDir, conf, - GridMixRunMode.DATA_GENERATION.getValue(), - runtimeValues, otherArgs); - Assert.assertEquals("Data generation has failed.", 0 , exitCode); - LOG.info("Verify the eache file size in a generate data."); - verifyEachNodeSize(new Path(gridmixDir, "input"), perNodeSizeInMB); - verifyNumOfFilesGeneratedInEachNode(new Path(gridmixDir, "input"), - perNodeSizeInMB, bytesPerFile); - checkGeneratedDataAndJobStatus(inputSizeInMB); - } - - private void checkGeneratedDataAndJobStatus(long inputSize) - throws IOException { - LOG.info("Verify the generated data size."); - long dataSizeInMB = getDataSizeInMB(new Path(gridmixDir,"input")); - Assert.assertTrue("Generate data has not matched with given size", - dataSizeInMB + 0.1 > inputSize || dataSizeInMB - 0.1 < inputSize); - - JobClient jobClient = jtClient.getClient(); - int len = jobClient.getAllJobs().length; - LOG.info("Verify the job status after completion of job."); - Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, - jobClient.getAllJobs()[len-1].getRunState()); - } - - private void verifyEachNodeSize(Path inputDir, long dataSizePerNode) - throws IOException { - FileSystem fs = inputDir.getFileSystem(conf); - FileStatus [] fstatus = fs.listStatus(inputDir); - for (FileStatus fstat : fstatus) { - if ( fstat.isDirectory()) { - long fileSize = getDataSizeInMB(fstat.getPath()); - Assert.assertTrue("The Size has not matched with given " - + "per node file size(" + dataSizePerNode +"MB)", - fileSize + 0.1 > dataSizePerNode - || fileSize - 0.1 < dataSizePerNode); - } - } - } - - private void verifyNumOfFilesGeneratedInEachNode(Path inputDir, - long nodeSize, long fileSize) throws IOException { - long fileCount = nodeSize/fileSize; - long expFileCount = Math.round(fileCount); - expFileCount = expFileCount + ((nodeSize%fileSize != 0)? 1:0); - FileSystem fs = inputDir.getFileSystem(conf); - FileStatus [] fstatus = fs.listStatus(inputDir); - for (FileStatus fstat : fstatus) { - if ( fstat.isDirectory()) { - FileSystem nodeFs = fstat.getPath().getFileSystem(conf); - long actFileCount = nodeFs.getContentSummary( - fstat.getPath()).getFileCount(); - Assert.assertEquals("File count has not matched.", expFileCount, - actFileCount); - } - } - } - - private static long getDataSizeInMB(Path inputDir) throws IOException { - FileSystem fs = inputDir.getFileSystem(conf); - ContentSummary csmry = fs.getContentSummary(inputDir); - long dataSize = csmry.getLength(); - dataSize = dataSize/(1024 * 1024); - return dataSize; - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java deleted file mode 100644 index 883feec88fc..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapred.gridmix.FilePool; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import java.io.IOException; -import java.util.ArrayList; - -public class TestGridMixFilePool { - private static final Log LOG = - LogFactory.getLog(TestGridMixFilePool.class); - private static Configuration conf = new Configuration(); - private static MRCluster cluster; - private static JTProtocol remoteClient; - private static JTClient jtClient; - private static Path gridmixDir; - private static int clusterSize; - - @BeforeClass - public static void before() throws Exception { - String [] excludeExpList = {"java.net.ConnectException", - "java.io.IOException"}; - cluster = MRCluster.createCluster(conf); - cluster.setExcludeExpList(excludeExpList); - cluster.setUp(); - jtClient = cluster.getJTClient(); - remoteClient = jtClient.getProxy(); - clusterSize = cluster.getTTClients().size(); - gridmixDir = new Path("herriot-gridmix"); - UtilsForGridmix.createDirs(gridmixDir, remoteClient.getDaemonConf()); - } - - @AfterClass - public static void after() throws Exception { - UtilsForGridmix.cleanup(gridmixDir, conf); - cluster.tearDown(); - } - - @Test - public void testFilesCountAndSizesForSpecifiedFilePool() throws Exception { - conf = remoteClient.getDaemonConf(); - final long inputSizeInMB = clusterSize * 200; - int [] fileSizesInMB = {50, 100, 400, 50, 300, 10, 60, 40, 20 ,10 , 500}; - long targetSize = Long.MAX_VALUE; - final int expFileCount = clusterSize + 4; - String [] runtimeValues ={"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file:///dev/null"}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - // Generate the input data by using gridmix framework. - int exitCode = - UtilsForGridmix.runGridmixJob(gridmixDir, conf, - GridMixRunMode.DATA_GENERATION.getValue(), - runtimeValues, otherArgs); - Assert.assertEquals("Data generation has failed.", 0 , exitCode); - // Create the files without using gridmix input generation with - // above mentioned sizes in a array. - createFiles(new Path(gridmixDir, "input"), fileSizesInMB); - conf.setLong(FilePool.GRIDMIX_MIN_FILE, 100 * 1024 * 1024); - FilePool fpool = new FilePool(conf, new Path(gridmixDir, "input")); - fpool.refresh(); - verifyFilesSizeAndCountForSpecifiedPool(expFileCount, targetSize, fpool); - } - - private void createFiles(Path inputDir, int [] fileSizes) - throws Exception { - for (int size : fileSizes) { - UtilsForGridmix.createFile(size, inputDir, conf); - } - } - - private void verifyFilesSizeAndCountForSpecifiedPool(int expFileCount, - long minFileSize, FilePool pool) throws IOException { - final ArrayList files = new ArrayList(); - long filesSizeInBytes = pool.getInputFiles(minFileSize, files); - long actFilesSizeInMB = filesSizeInBytes / (1024 * 1024); - long expFilesSizeInMB = (clusterSize * 200) + 1300; - Assert.assertEquals("Files Size has not matched for specified pool.", - expFilesSizeInMB, actFilesSizeInMB); - int actFileCount = files.size(); - Assert.assertEquals("File count has not matched.", expFileCount, - actFileCount); - int count = 0; - for (FileStatus fstat : files) { - String fp = fstat.getPath().toString(); - count = count + ((fp.indexOf("datafile_") > 0)? 0 : 1); - } - Assert.assertEquals("Total folders are not matched with cluster size", - clusterSize, count); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java deleted file mode 100644 index 1dfc8970e61..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.gridmix.Gridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix generated input if compression emulation turn on. - */ -public class TestGridmixCompressedInputGeneration - extends GridmixSystemTestCase { - - private static final Log LOG = - LogFactory.getLog("TestGridmixCompressedInputGeneration.class"); - - /** - * Generate input data and verify whether input files are compressed - * or not. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixCompressionInputGeneration() throws Exception { - final long inputSizeInMB = 1024 * 7; - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file:///dev/null"}; - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true" - }; - LOG.info("Verify the generated compressed input data."); - runAndVerify(true, inputSizeInMB, runtimeValues, otherArgs); - } - - /** - * Disable compression emulation and verify whether input files are - * compressed or not. - * @throws Exception - */ - @Test - public void testGridmixInputGenerationWithoutCompressionEnable() - throws Exception { - UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf()); - final long inputSizeInMB = 1024 * 6; - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file:///dev/null"}; - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - LOG.info("Verify the generated uncompressed input data."); - runAndVerify(false, inputSizeInMB, runtimeValues, otherArgs); - } - - private void runAndVerify(boolean isCompressed, long INPUT_SIZE, - String [] runtimeValues, String [] otherArgs) throws Exception { - int exitCode = - UtilsForGridmix.runGridmixJob(gridmixDir, conf, - GridMixRunMode.DATA_GENERATION.getValue(), - runtimeValues,otherArgs); - Assert.assertEquals("Data generation has failed.", 0, exitCode); - verifyJobStatus(); - verifyInputDataSize(INPUT_SIZE); - verifyInputFiles(isCompressed); - } - - private void verifyInputFiles(boolean isCompressed) throws IOException { - List inputFiles = - getInputFiles(conf, Gridmix.getGridmixInputDataPath(gridmixDir)); - for (String inputFile: inputFiles) { - boolean fileStatus = (inputFile.contains(".gz") - || inputFile.contains(".tgz"))? true : false; - if (isCompressed) { - Assert.assertTrue("Compressed input split file was not found.", - fileStatus); - } else { - Assert.assertFalse("Uncompressed input split file was not found.", - fileStatus); - } - } - } - - private void verifyInputDataSize(long INPUT_SIZE) throws IOException { - long actDataSize = - getInputDataSizeInMB(conf, Gridmix.getGridmixInputDataPath(gridmixDir)); - double ratio = ((double)actDataSize)/INPUT_SIZE; - long expDataSize = (long)(INPUT_SIZE * ratio); - Assert.assertEquals("Generated data has not matched with given size.", - expDataSize, actDataSize); - } - - private void verifyJobStatus() throws IOException { - JobClient jobClient = jtClient.getClient(); - int len = jobClient.getAllJobs().length; - LOG.info("Verify the job status after completion of job..."); - Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, - jobClient.getAllJobs()[len -1].getRunState()); - } - - private long getInputDataSizeInMB(Configuration conf, Path inputDir) - throws IOException { - FileSystem fs = inputDir.getFileSystem(conf); - ContentSummary csmry = fs.getContentSummary(inputDir); - long dataSize = csmry.getLength(); - dataSize = dataSize/(1024 * 1024); - return dataSize; - } - - private List getInputFiles(Configuration conf, Path inputDir) - throws IOException { - FileSystem fs = inputDir.getFileSystem(conf); - FileStatus [] listStatus = fs.listStatus(inputDir); - List files = new ArrayList(); - for (FileStatus fileStat : listStatus) { - files.add(getInputFile(fileStat, conf)); - } - return files; - } - - private String getInputFile(FileStatus fstatus, Configuration conf) - throws IOException { - String fileName = null; - if (!fstatus.isDirectory()) { - fileName = fstatus.getPath().getName(); - } else { - FileSystem fs = fstatus.getPath().getFileSystem(conf); - FileStatus [] listStatus = fs.listStatus(fstatus.getPath()); - for (FileStatus fileStat : listStatus) { - return getInputFile(fileStat, conf); - } - } - return fileName; - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java deleted file mode 100644 index 3fdd16d7f6f..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the gridmix jobs compression ratios of map input, - * map output and reduce output with default and user specified - * compression ratios. - * - */ -public class TestGridmixCompressionEmulationWithCompressInput - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestGridmixCompressionEmulationWithCompressInput.class"); - final long inputSizeInMB = 1024 * 6; - - /** - * Generate compressed input data and verify the map input, - * map output and reduce output compression ratios of gridmix jobs - * against the default compression ratios. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixCompressionRatiosAgainstDefaultCompressionRatio() - throws Exception { - final String tracePath = getTraceFile("compression_case1_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Verify map input, map output and reduce output compression ratios of - * gridmix jobs against user specified compression ratios. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixOuputCompressionRatiosAgainstCustomRatios() - throws Exception { - final String tracePath = getTraceFile("compression_case1_trace"); - Assert.assertNotNull("Trace file has not found.", tracePath); - UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf()); - - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true", - "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.68", - "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35", - "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.40" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java deleted file mode 100644 index e6c7e6af46b..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix emulation of HDFS private distributed cache file. - */ -public class TestGridmixEmulationOfHDFSPrivateDCFile - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixEmulationOfHDFSPrivateDCFile.class"); - /** - * Generate input data and single HDFS private distributed cache - * file based on given input trace.Verify the Gridmix emulation of - * single private HDFS distributed cache file in RoundRobinUserResolver - * mode with STRESS submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulateOfHDFSPrivateDCFile() - throws Exception { - final long inputSizeInMB = 8192; - final String tracePath = getTraceFile("distcache_case3_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - /** - * Verify the Gridmix emulation of single HDFS private distributed - * cache file in SubmitterUserResolver mode with REPLAY submission - * policy by using the existing input data and HDFS private - * distributed cache file. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixEmulationOfHDFSPrivateDCFile() - throws Exception { - final String tracePath = getTraceFile("distcache_case3_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues ={"LOADJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - tracePath}; - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java deleted file mode 100644 index 0bf07fdf4d2..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix emulation of HDFS public distributed cache file. - */ -public class TestGridmixEmulationOfHDFSPublicDCFile - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixEmulationOfHDFSPublicDCFile.class"); - - /** - * Generate the input data and HDFS distributed cache file based - * on given input trace. Verify the Gridmix emulation of single HDFS - * public distributed cache file in SubmitterUserResolver mode with - * STRESS submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulationOfSingleHDFSDCFile() - throws Exception { - final long inputSizeInMB = 7168; - final String tracePath = getTraceFile("distcache_case1_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Verify the Gridmix emulation of Single HDFS public distributed cache - * file in RoundRobinUserResolver mode with REPLAY submission policy - * by using the existing input data and HDFS public distributed cache file. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixEmulationOfSingleHDFSPublicDCFile() - throws Exception { - final String tracePath = getTraceFile("distcache_case1_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java deleted file mode 100644 index b5d821f5009..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.GridmixJob; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the {@link Gridmix} with a high ram jobs trace and - * verify each {@link Gridmix} job whether it honors the high ram or not. - * In the trace the jobs should use the high ram for both maps and reduces. - */ -public class TestGridmixEmulationOfHighRamJobsCase1 - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase1.class"); - - /** - * Generate input data and run {@link Gridmix} with a high ram jobs trace - * as a load job and STRESS submission policy in a SubmitterUserResolver - * mode. Verify each {@link Gridmix} job whether it honors the high ram or not - * after completion of execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHighRamForMapsAndReducesOfMRJobs() - throws Exception { - final long inputSizeInMB = cSize * 400; - String tracePath = getTraceFile("highram_mr_jobs_case1"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"}; - - validateTaskMemoryParamters(tracePath, true); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java deleted file mode 100644 index bfca1f20dc3..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.GridmixJob; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the {@link Gridmix} with a high ram jobs trace and - * verify each {@link Gridmix} job whether it honors the high ram or not. - * In the trace the jobs should use the high ram only for maps. - */ -public class TestGridmixEmulationOfHighRamJobsCase2 - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase2.class"); - - /** - * Generate input data and run {@link Gridmix} with a high ram jobs trace - * as a load job and REPALY submission policy in a RoundRobinUserResolver - * mode. Verify each {@link Gridmix} job whether it honors the high ram or not - * after completion of execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHighRamForMapsOfMRJobs() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("highram_mr_jobs_case2"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"}; - - validateTaskMemoryParamters(tracePath, true); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java deleted file mode 100644 index bc5e3aafd12..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.GridmixJob; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the {@link Gridmix} with a high ram jobs trace and - * verify each {@link Gridmix} job whether it honors the high ram or not. - * In the trace the jobs should use the high ram only for reducers. - */ -public class TestGridmixEmulationOfHighRamJobsCase3 - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixEmulationOfHighRamJobsCase3.class); - - /** - * Generate input data and run {@link Gridmix} with a high ram jobs trace - * as a load job and SERIAL submission policy in a SubmitterUserResolver - * mode. Verify each {@link Gridmix} job whether it honors the - * high ram or not after completion of execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testEmulationOfHighRamForReducersOfMRJobs() - throws Exception { - final long inputSizeInMB = cSize * 250; - String tracePath = getTraceFile("highram_mr_jobs_case3"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"}; - - validateTaskMemoryParamters(tracePath, true); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java deleted file mode 100644 index 5f464ce39be..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; - -/** - * Verify the Gridmix emulation of Multiple HDFS private distributed - * cache files. - */ -public class TestGridmixEmulationOfMultipleHDFSPrivateDCFiles - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.class"); - - /** - * Generate input data and multiple HDFS private distributed cache - * files based on given input trace.Verify the Gridmix emulation of - * multiple private HDFS distributed cache files in RoundRobinUserResolver - * mode with SERIAL submission policy. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulationOfMultipleHDFSPrivateDCFiles() - throws Exception { - final long inputSize = 6144; - final String tracePath = getTraceFile("distcache_case4_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "SERIAL", - inputSize+"m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Verify the Gridmix emulation of multiple HDFS private distributed - * cache files in SubmitterUserResolver mode with STRESS submission - * policy by using the existing input data and HDFS private - * distributed cache files. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixEmulationOfMultipleHDFSPrivateDCFiles() - throws Exception { - final String tracePath = getTraceFile("distcache_case4_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - tracePath}; - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java deleted file mode 100644 index cca5da83ecb..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.Test; -import java.io.IOException; - -/** - * Verify the Gridmix emulation of Multiple HDFS public distributed - * cache files. - */ -public class TestGridmixEmulationOfMultipleHDFSPublicDCFiles - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog( - "TestGridmixEmulationOfMultipleHDFSPublicDCFiles.class"); - - /** - * Generate the compressed input data and dist cache files based - * on input trace. Verify the Gridmix emulation of - * multiple HDFS public distributed cache file. - * @throws Exception - if an error occurs. - */ - @Test - public void testGenerateAndEmulationOfMultipleHDFSDCFiles() - throws Exception { - final long inputSizeInMB = 7168; - final String tracePath = getTraceFile("distcache_case2_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - final String [] otherArgs = { - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Verify the Gridmix emulation of Single HDFS public distributed cache file - * by using an existing input compressed data and HDFS dist cache file. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixEmulationOfMulitpleHDFSPublicDCFile() - throws Exception { - final String tracePath = getTraceFile("distcache_case2_trace"); - Assert.assertNotNull("Trace file was not found.", tracePath); - final String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - tracePath}; - - final String [] otherArgs = { - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.RUN_GRIDMIX.getValue()); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java deleted file mode 100644 index ec11a2b36e6..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; - -/** - * Run the Gridmix with 10 minutes MR jobs trace and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith10minTrace extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixWith10minTrace.class); - - /** - * Generate data and run gridmix by sleep jobs with STRESS submission - * policy in a RoundRobinUserResolver mode against 10 minutes trace file. - * Verify each Gridmix job history with a corresponding job story - * in a trace file after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith10minTrace() throws Exception { - final long inputSizeInMB = cSize * 250; - final long minFileSize = 200 * 1024 * 1024; - String [] runtimeValues = - {"SLEEPJOB", - RoundRobinUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - map.get("10m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize, - "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false", - "-D", GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY + "=true", - "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10" - }; - String tracePath = map.get("10m"); - runGridmixAndVerify(runtimeValues, otherArgs,tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java deleted file mode 100644 index 9bcb45a3fbb..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; - -/** - * Run the Gridmix with 12 minutes MR job traces and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith12minTrace extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixWith12minTrace.class); - - /** - * Generate data and run gridmix sleep jobs with REPLAY submission - * policy in a SubmitterUserResolver mode against 12 minutes trace file. - * Verify each Gridmix job history with a corresponding job story - * in a trace file after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith12minTrace() throws Exception { - final long inputSizeInMB = cSize * 150; - String [] runtimeValues = {"SLEEPJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - map.get("12m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10", - "-D", GridMixConfig.GRIDMIX_SLEEP_REDUCE_MAX_TIME + "=5" - }; - - String tracePath = map.get("12m"); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java deleted file mode 100644 index c583e6d3a29..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.junit.Test; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; - -/** - * Run the Gridmix with 1 minute MR jobs trace and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith1minTrace extends GridmixSystemTestCase{ - private static final Log LOG = - LogFactory.getLog(TestGridmixWith1minTrace.class); - - /** - * Generate data and run gridmix by load job with STRESS submission policy - * in a SubmitterUserResolver mode against 1 minute trace file. - * Verify each Gridmix job history with a corresponding job story in the - * trace after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith1minTrace() throws Exception { - final long inputSizeInMB = cSize * 400; - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - map.get("1m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - String tracePath = map.get("1m"); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java deleted file mode 100644 index d9fb7c70f7f..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the Gridmix with 2 minutes job trace which has been generated with - * streaming jobs histories and verify each job history against - * the corresponding job story in a given trace file. - */ -public class TestGridmixWith2minStreamingJobTrace - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixWith2minStreamingJobTrace.class"); - - /** - * Generate input data and run Gridmix by load job with STRESS submission - * policy in a SubmitterUserResolver mode against 2 minutes job - * trace file of streaming jobs. Verify each Gridmix job history with - * a corresponding job story in a trace file after completion of all - * the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith2minStreamJobTrace() throws Exception { - final long inputSizeInMB = cSize * 250; - final long minFileSize = 150 * 1024 * 1024; - String tracePath = getTraceFile("2m_stream"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true", - "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java deleted file mode 100644 index 85dedf6675f..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.junit.Assert; -import org.junit.Test; - -/** - * Run the Gridmix with 3 minutes job trace which has been generated with - * streaming jobs histories and verify each job history against - * corresponding job story in a given trace file. - */ -public class TestGridmixWith3minStreamingJobTrace - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixWith3minStreamingJobTrace.class"); - - /** - * Generate input data and run gridmix by load job with REPLAY submission - * policy in a RoundRobinUserResolver mode against 3 minutes job trace file - * of streaming job. Verify each gridmix job history with a corresponding - * job story in a trace file after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith3minStreamJobTrace() throws Exception { - final long inputSizeInMB = cSize * 200; - final long bytesPerFile = 150 * 1024 * 1024; - String tracePath = getTraceFile("3m_stream"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true", - "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java deleted file mode 100644 index 5f2171fb401..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; - -/** - * Run the Gridmix with 3 minutes MR jobs trace and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith3minTrace extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixWith3minTrace.class); - - /** - * Generate data and run gridmix by load job with REPLAY submission - * policy in a RoundRobinUserResolver mode by using 3 minutes trace file. - * Verify each Gridmix job history with a corresponding job story in - * a trace after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith3minTrace() throws Exception { - final long inputSizeInMB = cSize * 200; - String [] runtimeValues = - {"LOADJOB", - RoundRobinUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - map.get("3m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - String tracePath = map.get("3m"); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java deleted file mode 100644 index ef1878c0855..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Run the Gridmix with 5 minutes job trace which has been generated with - * streaming jobs histories and verify each job history against - * corresponding job story in a given trace file. - */ -public class TestGridmixWith5minStreamingJobTrace - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestGridmixWith5minStreamingJobTrace.class"); - - /** - * Generate input data and run gridmix by load job with SERIAL submission - * policy in a SubmitterUserResolver mode against 5 minutes job trace file - * of streaming job. Verify each gridmix job history with a corresponding - * job story in a trace file after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith5minStreamJobTrace() throws Exception { - String tracePath = getTraceFile("5m_stream"); - Assert.assertNotNull("Trace file has not found.", tracePath); - final long inputSizeInMB = cSize * 200; - final long bytesPerFile = 150 * 1024 * 1024; - String [] runtimeValues = {"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_KEY_FRC + "=0.5f", - "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false" - }; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java deleted file mode 100644 index c55167e3b4f..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; - -/** - * Run the Gridmix with 5 minutes MR jobs trace and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith5minTrace extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixWith5minTrace.class); - - /** - * Generate data and run gridmix by load job with SERIAL submission - * policy in a SubmitterUserResolver mode against 5 minutes trace file. - * Verify each Gridmix job history with a corresponding job story - * in a trace file after completion of all the jobs. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith5minTrace() throws Exception { - final long inputSizeInMB = cSize * 300; - final long minFileSize = 100 * 1024 * 1024; - String [] runtimeValues ={"LOADJOB", - SubmitterUserResolver.class.getName(), - "SERIAL", - inputSizeInMB + "m", - map.get("5m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize - }; - - String tracePath = map.get("5m"); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java deleted file mode 100644 index 55be37b17dd..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.junit.Test; - -/** - * Run the Gridmix with 7 minutes MR jobs trace and - * verify each job history against the corresponding job story - * in a given trace file. - */ -public class TestGridmixWith7minTrace extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog(TestGridmixWith7minTrace.class); - - /** - * Generate data and run gridmix by sleep job with STRESS submission - * policy in a SubmitterUserResolver mode against 7 minute trace file. - * Verify each Gridmix job history with a corresponding job story - * in a trace file after completion of all the jobs execution. - * @throws Exception - if an error occurs. - */ - @Test - public void testGridmixWith7minTrace() throws Exception { - final long inputSizeInMB = cSize * 400; - final long minFileSize = 200 * 1024 * 1024; - String [] runtimeValues ={"SLEEPJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - map.get("7m")}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize, - "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false" - }; - String tracePath = map.get("7m"); - runGridmixAndVerify(runtimeValues, otherArgs, tracePath); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java deleted file mode 100644 index a82e806059b..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} memory emulation feature for the jobs with - * custom progress interval, different input data, submission policies - * and user resolver modes. Verify the total heap usage of map and reduce - * tasks of the jobs with corresponding original job in the trace. - */ -public class TestMemEmulForMapsAndReducesWithCustomIntrvl - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestMemEmulForMapsAndReducesWithCustomIntrvl.class"); - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * memory emulation with custom progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs - * with corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForReducesWithCompressedInputCase7() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * memory emulation with custom progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs - * with corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForReducesWithUncompressedInputCase8() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java deleted file mode 100644 index e1f211a11f3..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} memory emulation feature for gridmix jobs - * with default progress interval, different input data, submission - * policies and user resolver modes. Verify the total heap usage of - * map and reduce tasks of the jobs with corresponding original - * job in the trace. - */ -public class TestMemEmulForMapsAndReducesWithDefaultIntrvl - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestMemEmulForMapsAndReducesWithDefaultIntrvl.class"); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * memory emulation with default progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs - * with corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForReducesWithCompressedInputCase5() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * memory emulation with default progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs - * with corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForReducesWithUncompressedInputCase6() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "REPLAY", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java deleted file mode 100644 index da48ad4538e..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs - * with default progress interval, custom heap memory ratio, different input - * data, submission policies and user resolver modes. Verify the total heap - * usage of map and reduce tasks of the jobs with corresponding the original job - * in the trace. - */ -public class TestMemEmulForMapsWithCustomHeapMemoryRatio - extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestMemEmulForMapsWithCustomHeapMemoryRatio.class"); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * memory emulation. The {@link Gridmix} should use the following runtime - * parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify total heap memory usage of the tasks of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithCompressedInputCase1() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.5F"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * memory emulation. The {@link Gridmix} should use the following runtime - * parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver - * Verify total heap memory usage of tasks of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithUncompressedInputCase2() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("mem_emul_case2"); - Assert.assertNotNull("Trace file has not found.", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false", - "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.4F"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java deleted file mode 100644 index 5d1d452b8d6..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs - * with custom progress interval, different input data, submission policies - * and user resolver modes. Verify the total heap usage of map tasks of - * the jobs with corresponding the original job in the trace. - */ -public class TestMemEmulForMapsWithCustomIntrvl extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestMemEmulForMapsWithCustomIntrvl.class"); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * memory emulation with custom progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps total heap memory usage of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithCompressedInputCase3() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("mem_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * memory emulation with custom progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver - * Verify maps total heap memory usage of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithUncompressedInputCase4() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("mem_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F", - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java deleted file mode 100644 index ff136b89c7d..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig; -import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode; -import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Test; -import org.junit.Assert; - -/** - * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs - * with default progress interval, different input data, submission policies - * and user resolver modes. Verify the total heap usage of map tasks of the - * jobs with corresponding original job in the trace. - */ -public class TestMemEmulForMapsWithDefaultIntrvl extends GridmixSystemTestCase { - private static final Log LOG = - LogFactory.getLog("TestMemEmulForMapsWithDefaultIntrvl.class"); - - /** - * Generate compressed input and run {@link Gridmix} by turning on the - * memory emulation with default progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver - * Verify maps total heap memory usage of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithCompressedInputCase1() - throws Exception { - final long inputSizeInMB = 1024 * 7; - String tracePath = getTraceFile("mem_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - SubmitterUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } - - /** - * Generate uncompressed input and run {@link Gridmix} by turning on the - * memory emulation with default progress interval. The {@link Gridmix} - * should use the following runtime parameters while running the jobs. - * Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver - * Verify maps total heap memory usage of {@link Gridmix} jobs with - * corresponding original job in the trace. - * @throws Exception - if an error occurs. - */ - @Test - public void testMemoryEmulationForMapsWithUncompressedInputCase2() - throws Exception { - final long inputSizeInMB = cSize * 300; - String tracePath = getTraceFile("mem_emul_case1"); - Assert.assertNotNull("Trace file not found!", tracePath); - String [] runtimeValues = - { "LOADJOB", - RoundRobinUserResolver.class.getName(), - "STRESS", - inputSizeInMB + "m", - "file://" + UtilsForGridmix.getProxyUsersFile(conf), - tracePath}; - - String [] otherArgs = { - "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + - GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN, - "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", - "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", - "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"}; - - runGridmixAndVerify(runtimeValues, otherArgs, tracePath, - GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java deleted file mode 100644 index fc99162bd86..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java +++ /dev/null @@ -1,285 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; - -import org.apache.hadoop.mapred.gridmix.Gridmix; -import org.apache.hadoop.mapred.gridmix.JobCreator; -import org.apache.hadoop.mapred.gridmix.SleepJob; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*; - -/** - * Gridmix system tests configurations. - */ -public class GridMixConfig { - - /** - * Gridmix original job id. - */ - public static final String GRIDMIX_ORIGINAL_JOB_ID = Gridmix.ORIGINAL_JOB_ID; - - /** - * Gridmix output directory. - */ - public static final String GRIDMIX_OUTPUT_DIR = Gridmix.GRIDMIX_OUT_DIR; - - /** - * Gridmix job type (LOADJOB/SLEEPJOB). - */ - public static final String GRIDMIX_JOB_TYPE = JobCreator.GRIDMIX_JOB_TYPE; - - /** - * Gridmix submission use queue. - */ - /* In Gridmix package the visibility of below mentioned - properties are protected and it have not visible outside - the package. However,it should required for system tests, - so it's re-defining in system tests config file.*/ - public static final String GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE = - "gridmix.job-submission.use-queue-in-trace"; - - /** - * Gridmix user resolver(RoundRobinUserResolver/ - * SubmitterUserResolver/EchoUserResolver). - */ - public static final String GRIDMIX_USER_RESOLVER = Gridmix.GRIDMIX_USR_RSV; - - /** - * Gridmix queue depth. - */ - public static final String GRIDMIX_QUEUE_DEPTH = Gridmix.GRIDMIX_QUE_DEP; - - /* In Gridmix package the visibility of below mentioned - property is protected and it should not available for - outside the package. However,it should required for - system tests, so it's re-defining in system tests config file.*/ - /** - * Gridmix generate bytes per file. - */ - public static final String GRIDMIX_BYTES_PER_FILE = - "gridmix.gen.bytes.per.file"; - - /** - * Gridmix job submission policy(STRESS/REPLAY/SERIAL). - */ - - public static final String GRIDMIX_SUBMISSION_POLICY = - "gridmix.job-submission.policy"; - - /** - * Gridmix minimum file size. - */ - public static final String GRIDMIX_MINIMUM_FILE_SIZE = - "gridmix.min.file.size"; - - /** - * Gridmix key fraction. - */ - public static final String GRIDMIX_KEY_FRC = - "gridmix.key.fraction"; - - /** - * Gridmix compression enable - */ - public static final String GRIDMIX_COMPRESSION_ENABLE = - "gridmix.compression-emulation.enable"; - /** - * Gridmix distcache enable - */ - public static final String GRIDMIX_DISTCACHE_ENABLE = - "gridmix.distributed-cache-emulation.enable"; - - /** - * Gridmix input decompression enable. - */ - public static final String GRIDMIX_INPUT_DECOMPRESS_ENABLE = - "gridmix.compression-emulation.input-decompression.enable"; - - /** - * Gridmix input compression ratio. - */ - public static final String GRIDMIX_INPUT_COMPRESS_RATIO = - "gridmix.compression-emulation.map-input.decompression-ratio"; - - /** - * Gridmix intermediate compression ratio. - */ - public static final String GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO = - "gridmix.compression-emulation.map-output.compression-ratio"; - - /** - * Gridmix output compression ratio. - */ - public static final String GRIDMIX_OUTPUT_COMPRESSION_RATIO = - "gridmix.compression-emulation.reduce-output.compression-ratio"; - - /** - * Gridmix distributed cache visibilities. - */ - public static final String GRIDMIX_DISTCACHE_VISIBILITIES = - MRJobConfig.CACHE_FILE_VISIBILITIES; - - /** - * Gridmix distributed cache files. - */ - public static final String GRIDMIX_DISTCACHE_FILES = - MRJobConfig.CACHE_FILES; - - /** - * Gridmix distributed cache files size. - */ - public static final String GRIDMIX_DISTCACHE_FILESSIZE = - MRJobConfig.CACHE_FILES_SIZES; - - /** - * Gridmix distributed cache files time stamp. - */ - public static final String GRIDMIX_DISTCACHE_TIMESTAMP = - MRJobConfig.CACHE_FILE_TIMESTAMPS; - - /** - * Gridmix logger mode. - */ - public static final String GRIDMIX_LOG_MODE = - "log4j.logger.org.apache.hadoop.mapred.gridmix"; - - /** - * Gridmix sleep job map task only. - */ - public static final String GRIDMIX_SLEEPJOB_MAPTASK_ONLY = - SleepJob.SLEEPJOB_MAPTASK_ONLY; - - /** - * Gridmix sleep map maximum time. - */ - public static final String GRIDMIX_SLEEP_MAP_MAX_TIME = - SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME; - - /** - * Gridmix sleep reduce maximum time. - */ - public static final String GRIDMIX_SLEEP_REDUCE_MAX_TIME = - SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME; - - /** - * Gridmix high ram job emulation enable. - */ - public static final String GRIDMIX_HIGH_RAM_JOB_ENABLE = - "gridmix.highram-emulation.enable"; - - /** - * Job map memory in mb. - */ - public static final String JOB_MAP_MEMORY_MB = - MRJobConfig.MAP_MEMORY_MB; - - /** - * Job reduce memory in mb. - */ - public static final String JOB_REDUCE_MEMORY_MB = - MRJobConfig.REDUCE_MEMORY_MB; - - /** - * Cluster map memory in mb. - */ - public static final String CLUSTER_MAP_MEMORY = - MRConfig.MAPMEMORY_MB; - - /** - * Cluster reduce memory in mb. - */ - public static final String CLUSTER_REDUCE_MEMORY = - MRConfig.REDUCEMEMORY_MB; - - /** - * Cluster maximum map memory. - */ - public static final String CLUSTER_MAX_MAP_MEMORY = - JTConfig.JT_MAX_MAPMEMORY_MB; - - /** - * Cluster maximum reduce memory. - */ - public static final String CLUSTER_MAX_REDUCE_MEMORY = - JTConfig.JT_MAX_REDUCEMEMORY_MB; - - /** - * Gridmix cpu emulation. - */ - public static final String GRIDMIX_CPU_EMULATON = - ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS; - - /** - * Gridmix cpu usage emulation plugin. - */ - public static final String GRIDMIX_CPU_USAGE_PLUGIN = - CumulativeCpuUsageEmulatorPlugin.class.getName(); - - /** - * Gridmix cpu emulation custom interval. - */ - public static final String GRIDMIX_CPU_CUSTOM_INTERVAL = - CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL; - - /** - * Gridmix cpu emulation lower limit. - */ - public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55; - - /** - * Gridmix cpu emulation upper limit. - */ - public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130; - - /** - * Gridmix heap memory custom interval - */ - public static final String GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL = - TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL; - - /** - * Gridmix heap free memory ratio - */ - public static final String GRIDMIX_HEAP_FREE_MEMORY_RATIO = - TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO; - - /** - * Gridmix memory emulation plugin - */ - public static final String GRIDMIX_MEMORY_EMULATION_PLUGIN = - TotalHeapUsageEmulatorPlugin.class.getName(); - - /** - * Gridmix memory emulation - */ - public static final String GRIDMIX_MEMORY_EMULATON = - ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS; - - /** - * Gridmix memory emulation lower limit. - */ - public static int GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT = 55; - - /** - * Gridmix memory emulation upper limit. - */ - public static int GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT = 130; - -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java deleted file mode 100644 index 0abfc5ce00a..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; -/** - * Gridmix run modes. - * - */ -public enum GridMixRunMode { - DATA_GENERATION(1), RUN_GRIDMIX(2), DATA_GENERATION_AND_RUN_GRIDMIX(3); - private int mode; - - GridMixRunMode (int mode) { - this.mode = mode; - } - - public int getValue() { - return mode; - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java deleted file mode 100644 index ad00f0d9a16..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.tools.rumen.ZombieJobProducer; -import org.apache.hadoop.tools.rumen.ZombieJob; -import org.apache.hadoop.conf.Configuration; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -/** - * Build the job stories with a given trace file. - */ -public class GridmixJobStory { - private static Log LOG = LogFactory.getLog(GridmixJobStory.class); - private Path path; - private Map zombieJobs; - private Configuration conf; - - public GridmixJobStory(Path path, Configuration conf) { - this.path = path; - this.conf = conf; - try { - zombieJobs = buildJobStories(); - if(zombieJobs == null) { - throw new NullPointerException("No jobs found in a " - + " given trace file."); - } - } catch (IOException ioe) { - LOG.warn("Error:" + ioe.getMessage()); - } catch (NullPointerException npe) { - LOG.warn("Error:" + npe.getMessage()); - } - } - - /** - * Get the zombie jobs as a map. - * @return the zombie jobs map. - */ - public Map getZombieJobs() { - return zombieJobs; - } - - /** - * Get the zombie job of a given job id. - * @param jobId - gridmix job id. - * @return - the zombie job object. - */ - public ZombieJob getZombieJob(JobID jobId) { - return zombieJobs.get(jobId); - } - - private Map buildJobStories() throws IOException { - ZombieJobProducer zjp = new ZombieJobProducer(path,null, conf); - Map hm = new HashMap(); - ZombieJob zj = zjp.getNextJob(); - while (zj != null) { - hm.put(zj.getJobID(),zj); - zj = zjp.getNextJob(); - } - if (hm.size() == 0) { - return null; - } else { - return hm; - } - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java deleted file mode 100644 index 6a5699eb048..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.junit.Assert; - -/** - * Submit the gridmix jobs. - */ -public class GridmixJobSubmission { - private static final Log LOG = - LogFactory.getLog(GridmixJobSubmission.class); - private int gridmixJobCount; - private Configuration conf; - private Path gridmixDir; - private JTClient jtClient; - - public GridmixJobSubmission(Configuration conf, JTClient jtClient , - Path gridmixDir) { - this.conf = conf; - this.jtClient = jtClient; - this.gridmixDir = gridmixDir; - } - - /** - * Submit the gridmix jobs. - * @param runtimeArgs - gridmix common runtime arguments. - * @param otherArgs - gridmix other runtime arguments. - * @param traceInterval - trace time interval. - * @throws Exception - */ - public void submitJobs(String [] runtimeArgs, - String [] otherArgs, int mode) throws Exception { - int prvJobCount = jtClient.getClient().getAllJobs().length; - int exitCode = -1; - if (otherArgs == null) { - exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, - mode, runtimeArgs); - } else { - exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, mode, - runtimeArgs, otherArgs); - } - Assert.assertEquals("Gridmix jobs have failed.", 0 , exitCode); - gridmixJobCount = jtClient.getClient().getAllJobs().length - prvJobCount; - } - - /** - * Get the submitted jobs count. - * @return count of no. of jobs submitted for a trace. - */ - public int getGridmixJobCount() { - return gridmixJobCount; - } - - /** - * Get the job configuration. - * @return Configuration of a submitted job. - */ - public Configuration getJobConf() { - return conf; - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java deleted file mode 100644 index e448412a39f..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java +++ /dev/null @@ -1,1166 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; - -import java.io.IOException; -import java.io.File; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.HashMap; -import java.util.SortedMap; -import java.util.TreeMap; -import java.util.Collections; -import java.util.Set; -import java.util.ArrayList; -import java.util.Arrays; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.TaskCounter; -import org.apache.hadoop.mapreduce.Counters; -import org.apache.hadoop.mapreduce.Counter; -import org.apache.hadoop.mapreduce.CounterGroup; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.tools.rumen.LoggedJob; -import org.apache.hadoop.tools.rumen.ZombieJob; -import org.apache.hadoop.tools.rumen.TaskInfo; -import org.junit.Assert; -import java.text.ParseException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.mapred.gridmix.GridmixSystemTestCase; - -/** - * Verifying each Gridmix job with corresponding job story in a trace file. - */ -public class GridmixJobVerification { - - private static Log LOG = LogFactory.getLog(GridmixJobVerification.class); - private Path path; - private Configuration conf; - private JTClient jtClient; - private String userResolverVal; - static final String origJobIdKey = GridMixConfig.GRIDMIX_ORIGINAL_JOB_ID; - static final String jobSubKey = GridMixConfig.GRIDMIX_SUBMISSION_POLICY; - static final String jobTypeKey = GridMixConfig.GRIDMIX_JOB_TYPE; - static final String mapTaskKey = GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY; - static final String usrResolver = GridMixConfig.GRIDMIX_USER_RESOLVER; - static final String fileOutputFormatKey = FileOutputFormat.COMPRESS; - static final String fileInputFormatKey = FileInputFormat.INPUT_DIR; - static final String compEmulKey = GridMixConfig.GRIDMIX_COMPRESSION_ENABLE; - static final String inputDecompKey = - GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE; - static final String mapInputCompRatio = - GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO; - static final String mapOutputCompRatio = - GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO; - static final String reduceOutputCompRatio = - GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO; - private Map> simuAndOrigJobsInfo = - new HashMap>(); - - /** - * Gridmix job verification constructor - * @param path - path of the gridmix output directory. - * @param conf - cluster configuration. - * @param jtClient - jobtracker client. - */ - public GridmixJobVerification(Path path, Configuration conf, - JTClient jtClient) { - this.path = path; - this.conf = conf; - this.jtClient = jtClient; - } - - /** - * It verifies the Gridmix jobs with corresponding job story in a trace file. - * @param jobids - gridmix job ids. - * @throws IOException - if an I/O error occurs. - * @throws ParseException - if an parse error occurs. - */ - public void verifyGridmixJobsWithJobStories(List jobids) - throws Exception { - - SortedMap origSubmissionTime = new TreeMap (); - SortedMap simuSubmissionTime = new TreeMap(); - GridmixJobStory gjs = new GridmixJobStory(path, conf); - final Iterator ite = jobids.iterator(); - File destFolder = new File(System.getProperty("java.io.tmpdir") - + "/gridmix-st/"); - destFolder.mkdir(); - while (ite.hasNext()) { - JobID simuJobId = ite.next(); - JobHistoryParser.JobInfo jhInfo = getSimulatedJobHistory(simuJobId); - Assert.assertNotNull("Job history not found.", jhInfo); - Counters counters = jhInfo.getTotalCounters(); - JobConf simuJobConf = getSimulatedJobConf(simuJobId, destFolder); - String origJobId = simuJobConf.get(origJobIdKey); - LOG.info("OriginalJobID<->CurrentJobID:" - + origJobId + "<->" + simuJobId); - - if (userResolverVal == null) { - userResolverVal = simuJobConf.get(usrResolver); - } - ZombieJob zombieJob = gjs.getZombieJob(JobID.forName(origJobId)); - Map mapJobCounters = getJobMapCounters(zombieJob); - Map reduceJobCounters = getJobReduceCounters(zombieJob); - if (simuJobConf.get(jobSubKey).contains("REPLAY")) { - origSubmissionTime.put(zombieJob.getSubmissionTime(), - origJobId.toString() + "^" + simuJobId); - simuSubmissionTime.put(jhInfo.getSubmitTime() , - origJobId.toString() + "^" + simuJobId); ; - } - - LOG.info("Verifying the job <" + simuJobId + "> and wait for a while..."); - verifySimulatedJobSummary(zombieJob, jhInfo, simuJobConf); - verifyJobMapCounters(counters, mapJobCounters, simuJobConf); - verifyJobReduceCounters(counters, reduceJobCounters, simuJobConf); - verifyCompressionEmulation(zombieJob.getJobConf(), simuJobConf, counters, - reduceJobCounters, mapJobCounters); - verifyDistributeCache(zombieJob,simuJobConf); - setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf, - zombieJob.getJobConf()); - verifyHighRamMemoryJobs(zombieJob, simuJobConf); - verifyCPUEmulationOfJobs(zombieJob, jhInfo, simuJobConf); - verifyMemoryEmulationOfJobs(zombieJob, jhInfo, simuJobConf); - LOG.info("Done."); - } - verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo); - } - - /** - * Verify the job submission order between the jobs in replay mode. - * @param origSubmissionTime - sorted map of original jobs submission times. - * @param simuSubmissionTime - sorted map of simulated jobs submission times. - */ - public void verifyJobSumissionTime(SortedMap origSubmissionTime, - SortedMap simuSubmissionTime) { - Assert.assertEquals("Simulated job's submission time count has " - + "not match with Original job's submission time count.", - origSubmissionTime.size(), simuSubmissionTime.size()); - for ( int index = 0; index < origSubmissionTime.size(); index ++) { - String origAndSimuJobID = origSubmissionTime.get(index); - String simuAndorigJobID = simuSubmissionTime.get(index); - Assert.assertEquals("Simulated jobs have not submitted in same " - + "order as original jobs submitted in REPLAY mode.", - origAndSimuJobID, simuAndorigJobID); - } - } - - /** - * It verifies the simulated job map counters. - * @param counters - Original job map counters. - * @param mapJobCounters - Simulated job map counters. - * @param jobConf - Simulated job configuration. - * @throws ParseException - If an parser error occurs. - */ - public void verifyJobMapCounters(Counters counters, - Map mapCounters, JobConf jobConf) throws ParseException { - if (!jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) { - Assert.assertEquals("Map input records have not matched.", - mapCounters.get("MAP_INPUT_RECS").longValue(), - getCounterValue(counters, "MAP_INPUT_RECORDS")); - } else { - Assert.assertTrue("Map Input Bytes are zero", - getCounterValue(counters,"HDFS_BYTES_READ") != 0); - Assert.assertNotNull("Map Input Records are zero", - getCounterValue(counters, "MAP_INPUT_RECORDS")!=0); - } - } - - /** - * It verifies the simulated job reduce counters. - * @param counters - Original job reduce counters. - * @param reduceCounters - Simulated job reduce counters. - * @param jobConf - simulated job configuration. - * @throws ParseException - if an parser error occurs. - */ - public void verifyJobReduceCounters(Counters counters, - Map reduceCounters, JobConf jobConf) throws ParseException { - if (jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) { - Assert.assertTrue("Reduce output records are not zero for sleep job.", - getCounterValue(counters, "REDUCE_OUTPUT_RECORDS") == 0); - Assert.assertTrue("Reduce output bytes are not zero for sleep job.", - getCounterValue(counters,"HDFS_BYTES_WRITTEN") == 0); - } - } - - /** - * It verifies the gridmix simulated job summary. - * @param zombieJob - Original job summary. - * @param jhInfo - Simulated job history info. - * @param jobConf - simulated job configuration. - * @throws IOException - if an I/O error occurs. - */ - public void verifySimulatedJobSummary(ZombieJob zombieJob, - JobHistoryParser.JobInfo jhInfo, JobConf jobConf) throws IOException { - Assert.assertEquals("Job id has not matched", zombieJob.getJobID(), - JobID.forName(jobConf.get(origJobIdKey))); - - Assert.assertEquals("Job maps have not matched", zombieJob.getNumberMaps(), - jhInfo.getTotalMaps()); - - if (!jobConf.getBoolean(mapTaskKey, false)) { - Assert.assertEquals("Job reducers have not matched", - zombieJob.getNumberReduces(), jhInfo.getTotalReduces()); - } else { - Assert.assertEquals("Job reducers have not matched", - 0, jhInfo.getTotalReduces()); - } - - Assert.assertEquals("Job status has not matched.", - zombieJob.getOutcome().name(), - convertJobStatus(jhInfo.getJobStatus())); - - LoggedJob loggedJob = zombieJob.getLoggedJob(); - Assert.assertEquals("Job priority has not matched.", - loggedJob.getPriority().toString(), - jhInfo.getPriority()); - - if (jobConf.get(usrResolver).contains("RoundRobin")) { - String user = UserGroupInformation.getLoginUser().getShortUserName(); - Assert.assertTrue(jhInfo.getJobId().toString() - + " has not impersonate with other user.", - !jhInfo.getUsername().equals(user)); - } - } - - /** - * Get the original job map counters from a trace. - * @param zombieJob - Original job story. - * @return - map counters as a map. - */ - public Map getJobMapCounters(ZombieJob zombieJob) { - long expMapInputBytes = 0; - long expMapOutputBytes = 0; - long expMapInputRecs = 0; - long expMapOutputRecs = 0; - Map mapCounters = new HashMap(); - for (int index = 0; index < zombieJob.getNumberMaps(); index ++) { - TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index); - expMapInputBytes += mapTask.getInputBytes(); - expMapOutputBytes += mapTask.getOutputBytes(); - expMapInputRecs += mapTask.getInputRecords(); - expMapOutputRecs += mapTask.getOutputRecords(); - } - mapCounters.put("MAP_INPUT_BYTES", expMapInputBytes); - mapCounters.put("MAP_OUTPUT_BYTES", expMapOutputBytes); - mapCounters.put("MAP_INPUT_RECS", expMapInputRecs); - mapCounters.put("MAP_OUTPUT_RECS", expMapOutputRecs); - return mapCounters; - } - - /** - * Get the original job reduce counters from a trace. - * @param zombieJob - Original job story. - * @return - reduce counters as a map. - */ - public Map getJobReduceCounters(ZombieJob zombieJob) { - long expReduceInputBytes = 0; - long expReduceOutputBytes = 0; - long expReduceInputRecs = 0; - long expReduceOutputRecs = 0; - Map reduceCounters = new HashMap(); - for (int index = 0; index < zombieJob.getNumberReduces(); index ++) { - TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index); - expReduceInputBytes += reduceTask.getInputBytes(); - expReduceOutputBytes += reduceTask.getOutputBytes(); - expReduceInputRecs += reduceTask.getInputRecords(); - expReduceOutputRecs += reduceTask.getOutputRecords(); - } - reduceCounters.put("REDUCE_INPUT_BYTES", expReduceInputBytes); - reduceCounters.put("REDUCE_OUTPUT_BYTES", expReduceOutputBytes); - reduceCounters.put("REDUCE_INPUT_RECS", expReduceInputRecs); - reduceCounters.put("REDUCE_OUTPUT_RECS", expReduceOutputRecs); - return reduceCounters; - } - - /** - * Get the simulated job configuration of a job. - * @param simulatedJobID - Simulated job id. - * @param tmpJHFolder - temporary job history folder location. - * @return - simulated job configuration. - * @throws IOException - If an I/O error occurs. - */ - public JobConf getSimulatedJobConf(JobID simulatedJobID, File tmpJHFolder) - throws IOException{ - FileSystem fs = null; - try { - - String historyFilePath = - jtClient.getProxy().getJobHistoryLocationForRetiredJob(simulatedJobID); - Path jhpath = new Path(historyFilePath); - fs = jhpath.getFileSystem(conf); - fs.copyToLocalFile(jhpath,new Path(tmpJHFolder.toString())); - String historyPath = - historyFilePath.substring(0,historyFilePath.lastIndexOf("_")); - fs.copyToLocalFile(new Path(historyPath + "_conf.xml"), - new Path(tmpJHFolder.toString())); - JobConf jobConf = new JobConf(); - jobConf.addResource(new Path(tmpJHFolder.toString() - + "/" + simulatedJobID + "_conf.xml")); - jobConf.reloadConfiguration(); - return jobConf; - - }finally { - fs.close(); - } - } - - /** - * Get the simulated job history of a job. - * @param simulatedJobID - simulated job id. - * @return - simulated job information. - * @throws IOException - if an I/O error occurs. - */ - public JobHistoryParser.JobInfo getSimulatedJobHistory(JobID simulatedJobID) - throws IOException { - FileSystem fs = null; - try { - String historyFilePath = jtClient.getProxy(). - getJobHistoryLocationForRetiredJob(simulatedJobID); - Path jhpath = new Path(historyFilePath); - fs = jhpath.getFileSystem(conf); - JobHistoryParser jhparser = new JobHistoryParser(fs, jhpath); - JobHistoryParser.JobInfo jhInfo = jhparser.parse(); - return jhInfo; - - } finally { - fs.close(); - } - } - - /** - * It verifies the heap memory resource usage of gridmix jobs with - * corresponding original job in the trace. - * @param zombieJob - Original job history. - * @param jhInfo - Simulated job history. - * @param simuJobConf - simulated job configuration. - */ - public void verifyMemoryEmulationOfJobs(ZombieJob zombieJob, - JobHistoryParser.JobInfo jhInfo, - JobConf simuJobConf) throws Exception { - long origJobMapsTHU = 0; - long origJobReducesTHU = 0; - long simuJobMapsTHU = 0; - long simuJobReducesTHU = 0; - boolean isMemEmulOn = false; - if (simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON) != null) { - isMemEmulOn = - simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON). - contains(GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN); - } - - if (isMemEmulOn) { - for (int index = 0; index < zombieJob.getNumberMaps(); index ++) { - TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index); - if (mapTask.getResourceUsageMetrics().getHeapUsage() > 0) { - origJobMapsTHU += - mapTask.getResourceUsageMetrics().getHeapUsage(); - } - } - LOG.info("Original Job Maps Total Heap Usage: " + origJobMapsTHU); - - for (int index = 0; index < zombieJob.getNumberReduces(); index ++) { - TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index); - if (reduceTask.getResourceUsageMetrics().getHeapUsage() > 0) { - origJobReducesTHU += - reduceTask.getResourceUsageMetrics().getHeapUsage(); - } - } - LOG.info("Original Job Reduces Total Heap Usage: " + origJobReducesTHU); - - simuJobMapsTHU = - getCounterValue(jhInfo.getMapCounters(), - TaskCounter.COMMITTED_HEAP_BYTES.toString()); - LOG.info("Simulated Job Maps Total Heap Usage: " + simuJobMapsTHU); - - simuJobReducesTHU = - getCounterValue(jhInfo.getReduceCounters(), - TaskCounter.COMMITTED_HEAP_BYTES.toString()); - LOG.info("Simulated Jobs Reduces Total Heap Usage: " + simuJobReducesTHU); - - long mapCount = jhInfo.getTotalMaps(); - long reduceCount = jhInfo.getTotalReduces(); - - String strHeapRatio = - simuJobConf.get(GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO); - if (strHeapRatio == null) { - strHeapRatio = "0.3F"; - } - - if (mapCount > 0) { - double mapEmulFactor = (simuJobMapsTHU * 100) / origJobMapsTHU; - long mapEmulAccuracy = Math.round(mapEmulFactor); - LOG.info("Maps memory emulation accuracy of a job:" - + mapEmulAccuracy + "%"); - Assert.assertTrue("Map phase total memory emulation had crossed the " - + "configured max limit.", mapEmulAccuracy - <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT); - Assert.assertTrue("Map phase total memory emulation had not crossed " - + "the configured min limit.", mapEmulAccuracy - >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT); - double expHeapRatio = Double.parseDouble(strHeapRatio); - LOG.info("expHeapRatio for maps:" + expHeapRatio); - double actHeapRatio = - ((double)Math.abs(origJobMapsTHU - simuJobMapsTHU)) ; - actHeapRatio /= origJobMapsTHU; - LOG.info("actHeapRatio for maps:" + actHeapRatio); - Assert.assertTrue("Simulate job maps heap ratio not matched.", - actHeapRatio <= expHeapRatio); - } - - if (reduceCount >0) { - double reduceEmulFactor = (simuJobReducesTHU * 100) / origJobReducesTHU; - long reduceEmulAccuracy = Math.round(reduceEmulFactor); - LOG.info("Reduces memory emulation accuracy of a job:" - + reduceEmulAccuracy + "%"); - Assert.assertTrue("Reduce phase total memory emulation had crossed " - + "configured max limit.", reduceEmulAccuracy - <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT); - Assert.assertTrue("Reduce phase total memory emulation had not " - + "crosssed configured min limit.", reduceEmulAccuracy - >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT); - double expHeapRatio = Double.parseDouble(strHeapRatio); - LOG.info("expHeapRatio for reduces:" + expHeapRatio); - double actHeapRatio = - ((double)Math.abs(origJobReducesTHU - simuJobReducesTHU)); - actHeapRatio /= origJobReducesTHU; - LOG.info("actHeapRatio for reduces:" + actHeapRatio); - Assert.assertTrue("Simulate job reduces heap ratio not matched.", - actHeapRatio <= expHeapRatio); - } - } - } - - /** - * It verifies the cpu resource usage of a gridmix job against - * their original job. - * @param origJobHistory - Original job history. - * @param simuJobHistoryInfo - Simulated job history. - * @param simuJobConf - simulated job configuration. - */ - public void verifyCPUEmulationOfJobs(ZombieJob origJobHistory, - JobHistoryParser.JobInfo simuJobHistoryInfo, - JobConf simuJobConf) throws Exception { - - boolean isCpuEmulOn = false; - if (simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON) != null) { - isCpuEmulOn = - simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON). - contains(GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN); - } - - if (isCpuEmulOn) { - Map origJobMetrics = - getOriginalJobCPUMetrics(origJobHistory); - Map simuJobMetrics = - getSimulatedJobCPUMetrics(simuJobHistoryInfo); - - long origMapUsage = origJobMetrics.get("MAP"); - LOG.info("Maps cpu usage of original job:" + origMapUsage); - - long origReduceUsage = origJobMetrics.get("REDUCE"); - LOG.info("Reduces cpu usage of original job:" + origReduceUsage); - - long simuMapUsage = simuJobMetrics.get("MAP"); - LOG.info("Maps cpu usage of simulated job:" + simuMapUsage); - - long simuReduceUsage = simuJobMetrics.get("REDUCE"); - LOG.info("Reduces cpu usage of simulated job:"+ simuReduceUsage); - - long mapCount = simuJobHistoryInfo.getTotalMaps(); - long reduceCount = simuJobHistoryInfo.getTotalReduces(); - - if (mapCount > 0) { - double mapEmulFactor = (simuMapUsage * 100) / origMapUsage; - long mapEmulAccuracy = Math.round(mapEmulFactor); - LOG.info("CPU emulation accuracy for maps in job " + - simuJobHistoryInfo.getJobId() + - ":"+ mapEmulAccuracy + "%"); - Assert.assertTrue("Map-side cpu emulaiton inaccurate!" + - " Actual cpu usage: " + simuMapUsage + - " Expected cpu usage: " + origMapUsage, mapEmulAccuracy - >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT - && mapEmulAccuracy - <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT); - } - - if (reduceCount >0) { - double reduceEmulFactor = (simuReduceUsage * 100) / origReduceUsage; - long reduceEmulAccuracy = Math.round(reduceEmulFactor); - LOG.info("CPU emulation accuracy for reduces in job " + - simuJobHistoryInfo.getJobId() + - ": " + reduceEmulAccuracy + "%"); - Assert.assertTrue("Reduce side cpu emulaiton inaccurate!" + - " Actual cpu usage:" + simuReduceUsage + - "Expected cpu usage: " + origReduceUsage, - reduceEmulAccuracy - >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT - && reduceEmulAccuracy - <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT); - } - } - } - - /** - * Get the simulated job cpu metrics. - * @param jhInfo - Simulated job history - * @return - cpu metrics as a map. - * @throws Exception - if an error occurs. - */ - private Map getSimulatedJobCPUMetrics( - JobHistoryParser.JobInfo jhInfo) throws Exception { - Map resourceMetrics = new HashMap(); - long mapCPUUsage = - getCounterValue(jhInfo.getMapCounters(), - TaskCounter.CPU_MILLISECONDS.toString()); - resourceMetrics.put("MAP", mapCPUUsage); - long reduceCPUUsage = - getCounterValue(jhInfo.getReduceCounters(), - TaskCounter.CPU_MILLISECONDS.toString()); - resourceMetrics.put("REDUCE", reduceCPUUsage); - return resourceMetrics; - } - - /** - * Get the original job cpu metrics. - * @param zombieJob - original job history. - * @return - cpu metrics as map. - */ - private Map getOriginalJobCPUMetrics(ZombieJob zombieJob) { - long mapTotalCPUUsage = 0; - long reduceTotalCPUUsage = 0; - Map resourceMetrics = new HashMap(); - - for (int index = 0; index < zombieJob.getNumberMaps(); index ++) { - TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index); - if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) { - mapTotalCPUUsage += - mapTask.getResourceUsageMetrics().getCumulativeCpuUsage(); - } - } - resourceMetrics.put("MAP", mapTotalCPUUsage); - - for (int index = 0; index < zombieJob.getNumberReduces(); index ++) { - TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index); - if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) { - reduceTotalCPUUsage += - reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage(); - } - } - resourceMetrics.put("REDUCE", reduceTotalCPUUsage); - return resourceMetrics; - } - - /** - * Get the user resolver of a job. - */ - public String getJobUserResolver() { - return userResolverVal; - } - - /** - * It verifies the compression ratios of mapreduce jobs. - * @param origJobConf - original job configuration. - * @param simuJobConf - simulated job configuration. - * @param counters - simulated job counters. - * @param origReduceCounters - original job reduce counters. - * @param origMapCounters - original job map counters. - * @throws ParseException - if a parser error occurs. - * @throws IOException - if an I/O error occurs. - */ - public void verifyCompressionEmulation(JobConf origJobConf, - JobConf simuJobConf,Counters counters, - Map origReduceCounters, - Map origMapJobCounters) - throws ParseException,IOException { - if (simuJobConf.getBoolean(compEmulKey, false)) { - String inputDir = origJobConf.get(fileInputFormatKey); - Assert.assertNotNull(fileInputFormatKey + " is Null",inputDir); - long simMapInputBytes = getCounterValue(counters, "HDFS_BYTES_READ"); - long uncompressedInputSize = origMapJobCounters.get("MAP_INPUT_BYTES"); - long simReduceInputBytes = - getCounterValue(counters, "REDUCE_SHUFFLE_BYTES"); - long simMapOutputBytes = getCounterValue(counters, "MAP_OUTPUT_BYTES"); - - // Verify input compression whether it's enable or not. - if (inputDir.contains(".gz") || inputDir.contains(".tgz") - || inputDir.contains(".bz")) { - Assert.assertTrue("Input decompression attribute has been not set for " - + "for compressed input", - simuJobConf.getBoolean(inputDecompKey, false)); - - float INPUT_COMP_RATIO = - getExpectedCompressionRatio(simuJobConf, mapInputCompRatio); - float INTERMEDIATE_COMP_RATIO = - getExpectedCompressionRatio(simuJobConf, mapOutputCompRatio); - - // Verify Map Input Compression Ratio. - assertMapInputCompressionRatio(simMapInputBytes, uncompressedInputSize, - INPUT_COMP_RATIO); - - // Verify Map Output Compression Ratio. - assertMapOuputCompressionRatio(simReduceInputBytes, simMapOutputBytes, - INTERMEDIATE_COMP_RATIO); - } else { - Assert.assertEquals("MAP input bytes has not matched.", - convertBytes(uncompressedInputSize), - convertBytes(simMapInputBytes)); - } - - Assert.assertEquals("Simulated job output format has not matched with " - + "original job output format.", - origJobConf.getBoolean(fileOutputFormatKey,false), - simuJobConf.getBoolean(fileOutputFormatKey,false)); - - if (simuJobConf.getBoolean(fileOutputFormatKey,false)) { - float OUTPUT_COMP_RATIO = - getExpectedCompressionRatio(simuJobConf, reduceOutputCompRatio); - - //Verify reduce output compression ratio. - long simReduceOutputBytes = - getCounterValue(counters, "HDFS_BYTES_WRITTEN"); - long origReduceOutputBytes = - origReduceCounters.get("REDUCE_OUTPUT_BYTES"); - assertReduceOutputCompressionRatio(simReduceOutputBytes, - origReduceOutputBytes, - OUTPUT_COMP_RATIO); - } - } - } - - private void assertMapInputCompressionRatio(long simMapInputBytes, - long origMapInputBytes, - float expInputCompRatio) { - LOG.info("***Verify the map input bytes compression ratio****"); - LOG.info("Simulated job's map input bytes(REDUCE_SHUFFLE_BYTES): " - + simMapInputBytes); - LOG.info("Original job's map input bytes: " + origMapInputBytes); - - final float actInputCompRatio = - getActualCompressionRatio(simMapInputBytes, origMapInputBytes); - LOG.info("Expected Map Input Compression Ratio:" + expInputCompRatio); - LOG.info("Actual Map Input Compression Ratio:" + actInputCompRatio); - - float diffVal = (float)(expInputCompRatio * 0.06); - LOG.info("Expected Difference of Map Input Compression Ratio is <= " + - + diffVal); - float delta = Math.abs(expInputCompRatio - actInputCompRatio); - LOG.info("Actual Difference of Map Iput Compression Ratio:" + delta); - Assert.assertTrue("Simulated job input compression ratio has mismatched.", - delta <= diffVal); - LOG.info("******Done******"); - } - - private void assertMapOuputCompressionRatio(long simReduceInputBytes, - long simMapoutputBytes, - float expMapOuputCompRatio) { - LOG.info("***Verify the map output bytes compression ratio***"); - LOG.info("Simulated job reduce input bytes:" + simReduceInputBytes); - LOG.info("Simulated job map output bytes:" + simMapoutputBytes); - - final float actMapOutputCompRatio = - getActualCompressionRatio(simReduceInputBytes, simMapoutputBytes); - LOG.info("Expected Map Output Compression Ratio:" + expMapOuputCompRatio); - LOG.info("Actual Map Output Compression Ratio:" + actMapOutputCompRatio); - - float diffVal = 0.05f; - LOG.info("Expected Difference Of Map Output Compression Ratio is <= " - + diffVal); - float delta = Math.abs(expMapOuputCompRatio - actMapOutputCompRatio); - LOG.info("Actual Difference Of Map Ouput Compression Ratio :" + delta); - - Assert.assertTrue("Simulated job map output compression ratio " - + "has not been matched.", delta <= diffVal); - LOG.info("******Done******"); - } - - private void assertReduceOutputCompressionRatio(long simReduceOutputBytes, - long origReduceOutputBytes , float expOutputCompRatio ) { - LOG.info("***Verify the reduce output bytes compression ratio***"); - final float actOuputputCompRatio = - getActualCompressionRatio(simReduceOutputBytes, origReduceOutputBytes); - LOG.info("Simulated job's reduce output bytes:" + simReduceOutputBytes); - LOG.info("Original job's reduce output bytes:" + origReduceOutputBytes); - LOG.info("Expected output compression ratio:" + expOutputCompRatio); - LOG.info("Actual output compression ratio:" + actOuputputCompRatio); - long diffVal = (long)(origReduceOutputBytes * 0.15); - long delta = Math.abs(origReduceOutputBytes - simReduceOutputBytes); - LOG.info("Expected difference of output compressed bytes is <= " - + diffVal); - LOG.info("Actual difference of compressed ouput bytes:" + delta); - Assert.assertTrue("Simulated job reduce output compression ratio " + - "has not been matched.", delta <= diffVal); - LOG.info("******Done******"); - } - - private float getExpectedCompressionRatio(JobConf simuJobConf, - String RATIO_TYPE) { - // Default decompression ratio is 0.50f irrespective of original - //job compression ratio. - if (simuJobConf.get(RATIO_TYPE) != null) { - return Float.parseFloat(simuJobConf.get(RATIO_TYPE)); - } else { - return 0.50f; - } - } - - private float getActualCompressionRatio(long compressBytes, - long uncompessBytes) { - double ratio = ((double)compressBytes) / uncompessBytes; - int significant = (int)Math.round(ratio * 100); - return ((float)significant)/100; - } - - /** - * Verify the distributed cache files between the jobs in a gridmix run. - * @param jobsInfo - jobConfs of simulated and original jobs as a map. - */ - public void verifyDistributedCacheBetweenJobs( - Map> jobsInfo) { - if (jobsInfo.size() > 1) { - Map simJobfilesOccurBtnJobs = - getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 0); - Map origJobfilesOccurBtnJobs = - getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 1); - List simuOccurList = - getMapValuesAsList(simJobfilesOccurBtnJobs); - Collections.sort(simuOccurList); - List origOccurList = - getMapValuesAsList(origJobfilesOccurBtnJobs); - Collections.sort(origOccurList); - Assert.assertEquals("The unique count of distibuted cache files in " - + "simulated jobs have not matched with the unique " - + "count of original jobs distributed files ", - simuOccurList.size(), origOccurList.size()); - int index = 0; - for (Integer origDistFileCount : origOccurList) { - Assert.assertEquals("Distributed cache file reused in simulated " - + "jobs has not matched with reused of distributed" - + "cache file in original jobs.", - origDistFileCount, simuOccurList.get(index)); - index ++; - } - } - } - - /** - * Get the unique distributed cache files and occurrence between the jobs. - * @param jobsInfo - job's configurations as a map. - * @param jobConfIndex - 0 for simulated job configuration and - * 1 for original jobs configuration. - * @return - unique distributed cache files and occurrences as map. - */ - private Map getDistcacheFilesOccurenceBetweenJobs( - Map> jobsInfo, int jobConfIndex) { - Map filesOccurBtnJobs = new HashMap (); - Set jobIds = jobsInfo.keySet(); - Iterator ite = jobIds.iterator(); - while (ite.hasNext()) { - String jobId = ite.next(); - List jobconfs = jobsInfo.get(jobId); - String [] distCacheFiles = jobconfs.get(jobConfIndex).get( - GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(","); - String [] distCacheFileTimeStamps = jobconfs.get(jobConfIndex).get( - GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(","); - String [] distCacheFileVisib = jobconfs.get(jobConfIndex).get( - GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(","); - int indx = 0; - for (String distCacheFile : distCacheFiles) { - String fileAndSize = distCacheFile + "^" - + distCacheFileTimeStamps[indx] + "^" - + jobconfs.get(jobConfIndex).getUser(); - if (filesOccurBtnJobs.get(fileAndSize) != null) { - int count = filesOccurBtnJobs.get(fileAndSize); - count ++; - filesOccurBtnJobs.put(fileAndSize, count); - } else { - filesOccurBtnJobs.put(fileAndSize, 1); - } - } - } - return filesOccurBtnJobs; - } - - /** - * It verifies the distributed cache emulation of a job. - * @param zombieJob - Original job story. - * @param simuJobConf - Simulated job configuration. - */ - public void verifyDistributeCache(ZombieJob zombieJob, - JobConf simuJobConf) throws IOException { - if (simuJobConf.getBoolean(GridMixConfig.GRIDMIX_DISTCACHE_ENABLE, false)) { - JobConf origJobConf = zombieJob.getJobConf(); - assertFileVisibility(simuJobConf); - assertDistcacheFiles(simuJobConf,origJobConf); - assertFileSizes(simuJobConf,origJobConf); - assertFileStamps(simuJobConf,origJobConf); - } else { - Assert.assertNull("Configuration has distributed cache visibilites" - + "without enabled distributed cache emulation.", - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES)); - Assert.assertNull("Configuration has distributed cache files time " - + "stamps without enabled distributed cache emulation.", - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP)); - Assert.assertNull("Configuration has distributed cache files paths" - + "without enabled distributed cache emulation.", - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES)); - Assert.assertNull("Configuration has distributed cache files sizes" - + "without enabled distributed cache emulation.", - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE)); - } - } - - private void assertFileStamps(JobConf simuJobConf, JobConf origJobConf) { - //Verify simulated jobs against distributed cache files time stamps. - String [] origDCFTS = - origJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(","); - String [] simuDCFTS = - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(","); - for (int index = 0; index < origDCFTS.length; index++) { - Assert.assertTrue("Invalid time stamps between original " - +"and simulated job", Long.parseLong(origDCFTS[index]) - < Long.parseLong(simuDCFTS[index])); - } - } - - private void assertFileVisibility(JobConf simuJobConf ) { - // Verify simulated jobs against distributed cache files visibilities. - String [] distFiles = - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(","); - String [] simuDistVisibilities = - simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(","); - List expFileVisibility = new ArrayList(); - int index = 0; - for (String distFile : distFiles) { - boolean isLocalDistCache = GridmixSystemTestCase.isLocalDistCache( - distFile, - simuJobConf.getUser(), - Boolean.valueOf(simuDistVisibilities[index])); - if (!isLocalDistCache) { - expFileVisibility.add(true); - } else { - expFileVisibility.add(false); - } - index ++; - } - index = 0; - for (String actFileVisibility : simuDistVisibilities) { - Assert.assertEquals("Simulated job distributed cache file " - + "visibilities has not matched.", - expFileVisibility.get(index), - Boolean.valueOf(actFileVisibility)); - index ++; - } - } - - private void assertDistcacheFiles(JobConf simuJobConf, JobConf origJobConf) - throws IOException { - //Verify simulated jobs against distributed cache files. - String [] origDistFiles = origJobConf.get( - GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(","); - String [] simuDistFiles = simuJobConf.get( - GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(","); - String [] simuDistVisibilities = simuJobConf.get( - GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(","); - Assert.assertEquals("No. of simulatued job's distcache files mismacted" - + "with no.of original job's distcache files", - origDistFiles.length, simuDistFiles.length); - - int index = 0; - for (String simDistFile : simuDistFiles) { - Path distPath = new Path(simDistFile); - boolean isLocalDistCache = - GridmixSystemTestCase.isLocalDistCache(simDistFile, - simuJobConf.getUser(), - Boolean.valueOf(simuDistVisibilities[index])); - if (!isLocalDistCache) { - FileSystem fs = distPath.getFileSystem(conf); - FileStatus fstat = fs.getFileStatus(distPath); - FsPermission permission = fstat.getPermission(); - Assert.assertTrue("HDFS distributed cache file has wrong " - + "permissions for users.", - FsAction.READ_WRITE.SYMBOL - == permission.getUserAction().SYMBOL); - Assert.assertTrue("HDFS distributed cache file has wrong " - + "permissions for groups.", - FsAction.READ.SYMBOL - == permission.getGroupAction().SYMBOL); - Assert.assertTrue("HDSFS distributed cache file has wrong " - + "permissions for others.", - FsAction.READ.SYMBOL - == permission.getOtherAction().SYMBOL); - } - index++; - } - } - - private void assertFileSizes(JobConf simuJobConf, JobConf origJobConf) { - // Verify simulated jobs against distributed cache files size. - List origDistFilesSize = - Arrays.asList(origJobConf.get( - GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(",")); - Collections.sort(origDistFilesSize); - - List simuDistFilesSize = - Arrays.asList(simuJobConf.get( - GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(",")); - Collections.sort(simuDistFilesSize); - - Assert.assertEquals("Simulated job's file size list has not " - + "matched with the Original job's file size list.", - origDistFilesSize.size(), - simuDistFilesSize.size()); - - for (int index = 0; index < origDistFilesSize.size(); index ++) { - Assert.assertEquals("Simulated job distcache file size has not " - + "matched with original job distcache file size.", - origDistFilesSize.get(index), - simuDistFilesSize.get(index)); - } - } - - private void setJobDistributedCacheInfo(String jobId, JobConf simuJobConf, - JobConf origJobConf) { - if (simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES) != null) { - List jobConfs = new ArrayList(); - jobConfs.add(simuJobConf); - jobConfs.add(origJobConf); - simuAndOrigJobsInfo.put(jobId,jobConfs); - } - } - - private List getMapValuesAsList(Map jobOccurs) { - List occursList = new ArrayList(); - Set files = jobOccurs.keySet(); - Iterator ite = files.iterator(); - while (ite.hasNext()) { - String file = ite.next(); - occursList.add(jobOccurs.get(file)); - } - return occursList; - } - - /** - * It verifies the high ram gridmix jobs. - * @param zombieJob - Original job story. - * @param simuJobConf - Simulated job configuration. - */ - @SuppressWarnings("deprecation") - public void verifyHighRamMemoryJobs(ZombieJob zombieJob, - JobConf simuJobConf) { - JobConf origJobConf = zombieJob.getJobConf(); - int origMapFactor = getMapFactor(origJobConf); - int origReduceFactor = getReduceFactor(origJobConf); - boolean isHighRamEnable = - simuJobConf.getBoolean(GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE, - false); - if (isHighRamEnable) { - if (origMapFactor >= 2 && origReduceFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 1); - } else if(origMapFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 2); - } else if(origReduceFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 3); - } - } else { - if (origMapFactor >= 2 && origReduceFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 4); - } else if(origMapFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 5); - } else if(origReduceFactor >= 2) { - assertGridMixHighRamJob(simuJobConf, origJobConf, 6); - } - } - } - - /** - * Get the value for identifying the slots used by the map. - * @param jobConf - job configuration - * @return - map factor value. - */ - public static int getMapFactor(Configuration jobConf) { - long clusterMapMem = - Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_MAP_MEMORY)); - long jobMapMem = - Long.parseLong(jobConf.get(GridMixConfig.JOB_MAP_MEMORY_MB)); - return (int)Math.ceil((double)jobMapMem / clusterMapMem); - } - - /** - * Get the value for identifying the slots used by the reduce. - * @param jobConf - job configuration. - * @return - reduce factor value. - */ - public static int getReduceFactor(Configuration jobConf) { - long clusterReduceMem = - Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_REDUCE_MEMORY)); - long jobReduceMem = - Long.parseLong(jobConf.get(GridMixConfig.JOB_REDUCE_MEMORY_MB)); - return (int)Math.ceil((double)jobReduceMem / clusterReduceMem); - } - - @SuppressWarnings("deprecation") - private void assertGridMixHighRamJob(JobConf simuJobConf, - Configuration origConf, int option) { - int simuMapFactor = getMapFactor(simuJobConf); - int simuReduceFactor = getReduceFactor(simuJobConf); - /** - * option 1 : Both map and reduce honors the high ram. - * option 2 : Map only honors the high ram. - * option 3 : Reduce only honors the high ram. - * option 4 : Both map and reduce should not honors the high ram - * in disable state. - * option 5 : Map should not honors the high ram in disable state. - * option 6 : Reduce should not honors the high ram in disable state. - */ - switch (option) { - case 1 : - Assert.assertTrue("Gridmix job has not honored the high " - + "ram for map.", simuMapFactor >= 2 - && simuMapFactor == getMapFactor(origConf)); - Assert.assertTrue("Gridmix job has not honored the high " - + "ram for reduce.", simuReduceFactor >= 2 - && simuReduceFactor - == getReduceFactor(origConf)); - break; - case 2 : - Assert.assertTrue("Gridmix job has not honored the high " - + "ram for map.", simuMapFactor >= 2 - && simuMapFactor == getMapFactor(origConf)); - break; - case 3 : - Assert.assertTrue("Girdmix job has not honored the high " - + "ram for reduce.", simuReduceFactor >= 2 - && simuReduceFactor - == getReduceFactor(origConf)); - break; - case 4 : - Assert.assertTrue("Gridmix job has honored the high " - + "ram for map in emulation disable state.", - simuMapFactor < 2 - && simuMapFactor != getMapFactor(origConf)); - Assert.assertTrue("Gridmix job has honored the high " - + "ram for reduce in emulation disable state.", - simuReduceFactor < 2 - && simuReduceFactor - != getReduceFactor(origConf)); - break; - case 5 : - Assert.assertTrue("Gridmix job has honored the high " - + "ram for map in emulation disable state.", - simuMapFactor < 2 - && simuMapFactor != getMapFactor(origConf)); - break; - case 6 : - Assert.assertTrue("Girdmix job has honored the high " - + "ram for reduce in emulation disable state.", - simuReduceFactor < 2 - && simuReduceFactor - != getReduceFactor(origConf)); - break; - } - } - - /** - * Get task memory after scaling based on cluster configuration. - * @param jobTaskKey - Job task key attribute. - * @param clusterTaskKey - Cluster task key attribute. - * @param origConf - Original job configuration. - * @param simuConf - Simulated job configuration. - * @return scaled task memory value. - */ - @SuppressWarnings("deprecation") - public static long getScaledTaskMemInMB(String jobTaskKey, - String clusterTaskKey, - Configuration origConf, - Configuration simuConf) { - long simuClusterTaskValue = - simuConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT); - long origClusterTaskValue = - origConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT); - long origJobTaskValue = - origConf.getLong(jobTaskKey, JobConf.DISABLED_MEMORY_LIMIT); - double scaleFactor = - Math.ceil((double)origJobTaskValue / origClusterTaskValue); - long simulatedJobValue = (long)(scaleFactor * simuClusterTaskValue); - return simulatedJobValue; - } - - /** - * It Verifies the memory limit of a task. - * @param TaskMemInMB - task memory limit. - * @param taskLimitInMB - task upper limit. - */ - public static void verifyMemoryLimits(long TaskMemInMB, long taskLimitInMB) { - if (TaskMemInMB > taskLimitInMB) { - Assert.fail("Simulated job's task memory exceeds the " - + "upper limit of task virtual memory."); - } - } - - private String convertJobStatus(String jobStatus) { - if (jobStatus.equals("SUCCEEDED")) { - return "SUCCESS"; - } else { - return jobStatus; - } - } - - private String convertBytes(long bytesValue) { - int units = 1024; - if( bytesValue < units ) { - return String.valueOf(bytesValue)+ "B"; - } else { - // it converts the bytes into either KB or MB or GB or TB etc. - int exp = (int)(Math.log(bytesValue) / Math.log(units)); - return String.format("%1d%sB",(long)(bytesValue / Math.pow(units, exp)), - "KMGTPE".charAt(exp -1)); - } - } - - - private long getCounterValue(Counters counters, String key) - throws ParseException { - for (String groupName : counters.getGroupNames()) { - CounterGroup totalGroup = counters.getGroup(groupName); - Iterator itrCounter = totalGroup.iterator(); - while (itrCounter.hasNext()) { - Counter counter = itrCounter.next(); - if (counter.getName().equals(key)) { - return counter.getValue(); - } - } - } - return 0; - } -} - diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java deleted file mode 100644 index 723adbc23c3..00000000000 --- a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java +++ /dev/null @@ -1,513 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred.gridmix.test.system; - -import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.mapred.gridmix.Gridmix; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapreduce.JobID; -import java.util.Date; -import java.util.HashMap; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Arrays; -import java.net.URI; -import java.text.SimpleDateFormat; -import java.io.OutputStream; -import java.util.Set; -import java.util.List; -import java.util.Iterator; -import java.util.Map; -import java.io.File; -import java.io.FileOutputStream; -import org.apache.hadoop.test.system.ProxyUserDefinitions; -import org.apache.hadoop.test.system.ProxyUserDefinitions.GroupsAndHost; - -/** - * Gridmix utilities. - */ -public class UtilsForGridmix { - private static final Log LOG = LogFactory.getLog(UtilsForGridmix.class); - private static final Path DEFAULT_TRACES_PATH = - new Path(System.getProperty("user.dir") + "/src/test/system/resources/"); - - /** - * cleanup the folder or file. - * @param path - folder or file path. - * @param conf - cluster configuration - * @throws IOException - If an I/O error occurs. - */ - public static void cleanup(Path path, Configuration conf) - throws IOException { - FileSystem fs = path.getFileSystem(conf); - fs.delete(path, true); - fs.close(); - } - - /** - * Get the login user. - * @return - login user as string.. - * @throws IOException - if an I/O error occurs. - */ - public static String getUserName() throws IOException { - return UserGroupInformation.getLoginUser().getUserName(); - } - - /** - * Get the argument list for gridmix job. - * @param gridmixDir - gridmix parent directory. - * @param gridmixRunMode - gridmix modes either 1,2,3. - * @param values - gridmix runtime values. - * @param otherArgs - gridmix other generic args. - * @return - argument list as string array. - */ - public static String [] getArgsList(Path gridmixDir, int gridmixRunMode, - String [] values, String [] otherArgs) { - String [] runtimeArgs = { - "-D", GridMixConfig.GRIDMIX_LOG_MODE + "=DEBUG", - "-D", GridMixConfig.GRIDMIX_OUTPUT_DIR + "=gridmix", - "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true", - "-D", GridMixConfig.GRIDMIX_JOB_TYPE + "=" + values[0], - "-D", GridMixConfig.GRIDMIX_USER_RESOLVER + "=" + values[1], - "-D", GridMixConfig.GRIDMIX_SUBMISSION_POLICY + "=" + values[2] - }; - - String [] classArgs; - if ((gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() - || gridmixRunMode - == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) - && values[1].indexOf("RoundRobinUserResolver") > 0) { - classArgs = new String[] { - "-generate", values[3], - "-users", values[4], - gridmixDir.toString(), - values[5] - }; - } else if (gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() - || gridmixRunMode - == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) { - classArgs = new String[] { - "-generate", values[3], - gridmixDir.toString(), - values[4] - }; - } else if (gridmixRunMode == GridMixRunMode.RUN_GRIDMIX.getValue() - && values[1].indexOf("RoundRobinUserResolver") > 0) { - classArgs = new String[] { - "-users", values[3], - gridmixDir.toString(), - values[4] - }; - } else { - classArgs = new String[] { - gridmixDir.toString(),values[3] - }; - } - - String [] args = new String [runtimeArgs.length + - classArgs.length + ((otherArgs != null)?otherArgs.length:0)]; - System.arraycopy(runtimeArgs, 0, args, 0, runtimeArgs.length); - - if (otherArgs != null) { - System.arraycopy(otherArgs, 0, args, runtimeArgs.length, - otherArgs.length); - System.arraycopy(classArgs, 0, args, (runtimeArgs.length + - otherArgs.length), classArgs.length); - } else { - System.arraycopy(classArgs, 0, args, runtimeArgs.length, - classArgs.length); - } - return args; - } - - /** - * Create a file with specified size in mb. - * @param sizeInMB - file size in mb. - * @param inputDir - input directory. - * @param conf - cluster configuration. - * @throws Exception - if an exception occurs. - */ - public static void createFile(int sizeInMB, Path inputDir, - Configuration conf) throws Exception { - Date d = new Date(); - SimpleDateFormat sdf = new SimpleDateFormat("ddMMyy_HHmmssS"); - String formatDate = sdf.format(d); - FileSystem fs = inputDir.getFileSystem(conf); - OutputStream out = fs.create(new Path(inputDir,"datafile_" + formatDate)); - final byte[] b = new byte[1024 * 1024]; - for (int index = 0; index < sizeInMB; index++) { - out.write(b); - } - out.close(); - fs.close(); - } - - /** - * Create directories for a path. - * @param path - directories path. - * @param conf - cluster configuration. - * @throws IOException - if an I/O error occurs. - */ - public static void createDirs(Path path,Configuration conf) - throws IOException { - FileSystem fs = path.getFileSystem(conf); - if (!fs.exists(path)) { - fs.mkdirs(path); - } - } - - /** - * Run the Gridmix job with given runtime arguments. - * @param gridmixDir - Gridmix parent directory. - * @param conf - cluster configuration. - * @param gridmixRunMode - gridmix run mode either 1,2,3 - * @param runtimeValues -gridmix runtime values. - * @return - gridmix status either 0 or 1. - * @throws Exception - */ - public static int runGridmixJob(Path gridmixDir, Configuration conf, - int gridmixRunMode, String [] runtimeValues) throws Exception { - return runGridmixJob(gridmixDir, conf, gridmixRunMode, runtimeValues, null); - } - /** - * Run the Gridmix job with given runtime arguments. - * @param gridmixDir - Gridmix parent directory - * @param conf - cluster configuration. - * @param gridmixRunMode - gridmix run mode. - * @param runtimeValues - gridmix runtime values. - * @param otherArgs - gridmix other generic args. - * @return - gridmix status either 0 or 1. - * @throws Exception - */ - - public static int runGridmixJob(Path gridmixDir, Configuration conf, - int gridmixRunMode, String [] runtimeValues, - String [] otherArgs) throws Exception { - Path outputDir = new Path(gridmixDir, "gridmix"); - Path inputDir = new Path(gridmixDir, "input"); - LOG.info("Cleanup the data if data already exists."); - String modeName = new String(); - switch (gridmixRunMode) { - case 1 : - cleanup(inputDir, conf); - cleanup(outputDir, conf); - modeName = GridMixRunMode.DATA_GENERATION.name(); - break; - case 2 : - cleanup(outputDir, conf); - modeName = GridMixRunMode.RUN_GRIDMIX.name(); - break; - case 3 : - cleanup(inputDir, conf); - cleanup(outputDir, conf); - modeName = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.name(); - break; - } - - final String [] args = - UtilsForGridmix.getArgsList(gridmixDir, gridmixRunMode, - runtimeValues, otherArgs); - Gridmix gridmix = new Gridmix(); - LOG.info("Submit a Gridmix job in " + runtimeValues[1] - + " mode for " + modeName); - int exitCode = ToolRunner.run(conf, gridmix, args); - return exitCode; - } - - /** - * Get the proxy users file. - * @param conf - cluster configuration. - * @return String - proxy users file. - * @Exception - if no proxy users found in configuration. - */ - public static String getProxyUsersFile(Configuration conf) - throws Exception { - ProxyUserDefinitions pud = getProxyUsersData(conf); - String fileName = buildProxyUsersFile(pud.getProxyUsers()); - if (fileName == null) { - LOG.error("Proxy users file not found."); - throw new Exception("Proxy users file not found."); - } else { - return fileName; - } - } - - /** - * List the current gridmix jobid's. - * @param client - job client. - * @param execJobCount - number of executed jobs. - * @return - list of gridmix jobid's. - */ - public static List listGridmixJobIDs(JobClient client, - int execJobCount) throws IOException { - List jobids = new ArrayList(); - JobStatus [] jobStatus = client.getAllJobs(); - int numJobs = jobStatus.length; - for (int index = 1; index <= execJobCount; index++) { - JobStatus js = jobStatus[numJobs - index]; - JobID jobid = js.getJobID(); - String jobName = js.getJobName(); - if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") && - !jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) { - jobids.add(jobid); - } - } - return (jobids.size() == 0)? null : jobids; - } - - /** - * List the proxy users. - * @param conf - * @return - * @throws Exception - */ - public static List listProxyUsers(Configuration conf, - String loginUser) throws Exception { - List proxyUsers = new ArrayList(); - ProxyUserDefinitions pud = getProxyUsersData(conf); - Map usersData = pud.getProxyUsers(); - Collection users = usersData.keySet(); - Iterator itr = users.iterator(); - while (itr.hasNext()) { - String user = itr.next(); - if (!user.equals(loginUser)){ proxyUsers.add(user); }; - } - return proxyUsers; - } - - private static String buildProxyUsersFile(final Map - proxyUserData) throws Exception { - FileOutputStream fos = null; - File file = null; - StringBuffer input = new StringBuffer(); - Set users = proxyUserData.keySet(); - Iterator itr = users.iterator(); - while (itr.hasNext()) { - String user = itr.next().toString(); - if (!user.equals( - UserGroupInformation.getLoginUser().getShortUserName())) { - input.append(user); - final GroupsAndHost gah = proxyUserData.get(user); - final List groups = gah.getGroups(); - for (String group : groups) { - input.append(","); - input.append(group); - } - input.append("\n"); - } - } - if (input.length() > 0) { - try { - file = File.createTempFile("proxyusers", null); - fos = new FileOutputStream(file); - fos.write(input.toString().getBytes()); - } catch(IOException ioexp) { - LOG.warn(ioexp.getMessage()); - return null; - } finally { - fos.close(); - file.deleteOnExit(); - } - LOG.info("file.toString():" + file.toString()); - return file.toString(); - } else { - return null; - } - } - - private static ProxyUserDefinitions getProxyUsersData(Configuration conf) - throws Exception { - Iterator itr = conf.iterator(); - List proxyUsersData = new ArrayList(); - while (itr.hasNext()) { - String property = itr.next().toString(); - if (property.indexOf("hadoop.proxyuser") >= 0 - && property.indexOf("groups=") >= 0) { - proxyUsersData.add(property.split("\\.")[2]); - } - } - - if (proxyUsersData.size() == 0) { - LOG.error("No proxy users found in the configuration."); - throw new Exception("No proxy users found in the configuration."); - } - - ProxyUserDefinitions pud = new ProxyUserDefinitions() { - public boolean writeToFile(URI filePath) throws IOException { - throw new UnsupportedOperationException("No such methood exists."); - }; - }; - - for (String userName : proxyUsersData) { - List groups = Arrays.asList(conf.get("hadoop.proxyuser." + - userName + ".groups").split("//,")); - List hosts = Arrays.asList(conf.get("hadoop.proxyuser." + - userName + ".hosts").split("//,")); - ProxyUserDefinitions.GroupsAndHost definitions = - pud.new GroupsAndHost(); - definitions.setGroups(groups); - definitions.setHosts(hosts); - pud.addProxyUser(userName, definitions); - } - return pud; - } - - /** - * Gives the list of paths for MR traces against different time - * intervals.It fetches only the paths which followed the below - * file convention. - * Syntax : <FileName>_<TimeIntervals>.json.gz - * There is a restriction in a file and user has to - * follow the below convention for time interval. - * Syntax: <numeric>[m|h|d] - * e.g : for 10 minutes trace should specify 10m, - * same way for 1 hour traces should specify 1h, - * for 1 day traces should specify 1d. - * - * @param conf - cluster configuration. - * @return - list of MR paths as key/value pair based on time interval. - * @throws IOException - if an I/O error occurs. - */ - public static Map getMRTraces(Configuration conf) - throws IOException { - return getMRTraces(conf, DEFAULT_TRACES_PATH); - } - - /** - * It gives the list of paths for MR traces against different time - * intervals. It fetches only the paths which followed the below - * file convention. - * Syntax : <FileNames>_<TimeInterval>.json.gz - * There is a restriction in a file and user has to follow the - * below convention for time interval. - * Syntax: <numeric>[m|h|d] - * e.g : for 10 minutes trace should specify 10m, - * same way for 1 hour traces should specify 1h, - * for 1 day traces should specify 1d. - * - * @param conf - cluster configuration object. - * @param tracesPath - MR traces path. - * @return - list of MR paths as key/value pair based on time interval. - * @throws IOException - If an I/O error occurs. - */ - public static Map getMRTraces(Configuration conf, - Path tracesPath) throws IOException { - Map jobTraces = new HashMap (); - final FileSystem fs = FileSystem.getLocal(conf); - final FileStatus fstat[] = fs.listStatus(tracesPath); - for (FileStatus fst : fstat) { - final String fileName = fst.getPath().getName(); - if (fileName.endsWith("m.json.gz") - || fileName.endsWith("h.json.gz") - || fileName.endsWith("d.json.gz")) { - jobTraces.put(fileName.substring(fileName.indexOf("_") + 1, - fileName.indexOf(".json.gz")), fst.getPath().toString()); - } - } - if (jobTraces.size() == 0) { - LOG.error("No traces found in " + tracesPath.toString() + " path."); - throw new IOException("No traces found in " - + tracesPath.toString() + " path."); - } - return jobTraces; - } - - /** - * It list the all the MR traces path irrespective of time. - * @param conf - cluster configuration. - * @param tracesPath - MR traces path - * @return - MR paths as a list. - * @throws IOException - if an I/O error occurs. - */ - public static List listMRTraces(Configuration conf, - Path tracesPath) throws IOException { - List jobTraces = new ArrayList(); - final FileSystem fs = FileSystem.getLocal(conf); - final FileStatus fstat[] = fs.listStatus(tracesPath); - for (FileStatus fst : fstat) { - jobTraces.add(fst.getPath().toString()); - } - if (jobTraces.size() == 0) { - LOG.error("No traces found in " + tracesPath.toString() + " path."); - throw new IOException("No traces found in " - + tracesPath.toString() + " path."); - } - return jobTraces; - } - - /** - * It list the all the MR traces path irrespective of time. - * @param conf - cluster configuration. - * @param tracesPath - MR traces path - * @return - MR paths as a list. - * @throws IOException - if an I/O error occurs. - */ - public static List listMRTraces(Configuration conf) - throws IOException { - return listMRTraces(conf, DEFAULT_TRACES_PATH); - } - - /** - * Gives the list of MR traces for given time interval. - * The time interval should be following convention. - * Syntax : <numeric>[m|h|d] - * e.g : 10m or 1h or 2d etc. - * @param conf - cluster configuration - * @param timeInterval - trace time interval. - * @param tracesPath - MR traces Path. - * @return - MR paths as a list for a given time interval. - * @throws IOException - If an I/O error occurs. - */ - public static List listMRTracesByTime(Configuration conf, - String timeInterval, Path tracesPath) throws IOException { - List jobTraces = new ArrayList(); - final FileSystem fs = FileSystem.getLocal(conf); - final FileStatus fstat[] = fs.listStatus(tracesPath); - for (FileStatus fst : fstat) { - final String fileName = fst.getPath().getName(); - if (fileName.indexOf(timeInterval) >= 0) { - jobTraces.add(fst.getPath().toString()); - } - } - return jobTraces; - } - - /** - * Gives the list of MR traces for given time interval. - * The time interval should be following convention. - * Syntax : <numeric>[m|h|d] - * e.g : 10m or 1h or 2d etc. - * @param conf - cluster configuration - * @param timeInterval - trace time interval. - * @return - MR paths as a list for a given time interval. - * @throws IOException - If an I/O error occurs. - */ - public static List listMRTracesByTime(Configuration conf, - String timeInterval) throws IOException { - return listMRTracesByTime(conf, timeInterval, DEFAULT_TRACES_PATH); - } -} diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz deleted file mode 100644 index c1458361904..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz deleted file mode 100644 index 7bf17a06d21..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz deleted file mode 100644 index a72e41f4401..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz deleted file mode 100644 index 4e5615f94ad..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz deleted file mode 100644 index faba98bcdf1..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz deleted file mode 100644 index 5adbf43b057..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz deleted file mode 100644 index cdff79a9a74..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz deleted file mode 100644 index 211773857da..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz deleted file mode 100644 index b230610515f..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz deleted file mode 100644 index 7b93b0753f0..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz deleted file mode 100644 index 7bdd31366a5..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz deleted file mode 100644 index 04fd7056f2c..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz deleted file mode 100644 index 74742fc4d29..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz deleted file mode 100644 index c178761793c..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz deleted file mode 100644 index 9a53ad219bb..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz deleted file mode 100644 index 43a181a3ff5..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz deleted file mode 100644 index fa3d791dccc..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz deleted file mode 100644 index ee009eda82a..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz deleted file mode 100644 index c11a148dff5..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz deleted file mode 100644 index aa172529a0d..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz deleted file mode 100644 index 39e90d291c1..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case3.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz deleted file mode 100644 index 229d8d321bc..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case4.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz deleted file mode 100644 index 5f7fcab1d55..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case1.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz deleted file mode 100644 index d0ea21e6b4d..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/mem_emul_case2.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz deleted file mode 100644 index 2be6f37e4cb..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_10m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz deleted file mode 100644 index 7850026d388..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_12m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz deleted file mode 100644 index 21bff55ca84..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_1m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz deleted file mode 100644 index a27241e08c9..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_3m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz deleted file mode 100644 index 441ca3a199b..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_5m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz b/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz deleted file mode 100644 index 4aab5a1309f..00000000000 Binary files a/hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/trace_7m.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj deleted file mode 100644 index 05b613573e4..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JTProtocolAspect.aj +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import org.apache.hadoop.mapreduce.protocol.ClientProtocol; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TTInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; - -/** - * Aspect which injects the basic protocol functionality which is to be - * implemented by all the services which implement {@link ClientProtocol} - * - * Aspect also injects default implementation for the {@link JTProtocol} - */ - -public aspect JTProtocolAspect { - - // Make the ClientProtocl extend the JTprotocol - declare parents : ClientProtocol extends JTProtocol; - - /* - * Start of default implementation of the methods in JTProtocol - */ - - public Configuration JTProtocol.getDaemonConf() throws IOException { - return null; - } - - public JobInfo JTProtocol.getJobInfo(JobID jobID) throws IOException { - return null; - } - - public TaskInfo JTProtocol.getTaskInfo(TaskID taskID) throws IOException { - return null; - } - - public TTInfo JTProtocol.getTTInfo(String trackerName) throws IOException { - return null; - } - - public JobInfo[] JTProtocol.getAllJobInfo() throws IOException { - return null; - } - - public TaskInfo[] JTProtocol.getTaskInfo(JobID jobID) throws IOException { - return null; - } - - public TTInfo[] JTProtocol.getAllTTInfo() throws IOException { - return null; - } - - public boolean JTProtocol.isJobRetired(JobID jobID) throws IOException { - return false; - } - - public String JTProtocol.getJobHistoryLocationForRetiredJob(JobID jobID) throws IOException { - return ""; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj deleted file mode 100644 index 49df8a2b0c0..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobClientAspect.aj +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.protocol.ClientProtocol; - -public privileged aspect JobClientAspect { - - public ClientProtocol JobClient.getProtocol() { - return cluster.getClientProtocol(); - } - - public void JobClient.killJob(JobID id) throws IOException,InterruptedException { - cluster.getClientProtocol().killJob( - org.apache.hadoop.mapred.JobID.downgrade(id)); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj deleted file mode 100644 index ecfd8e95690..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobInProgressAspect.aj +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.jobhistory.JobHistory; -import org.apache.hadoop.mapreduce.test.system.JobInfo; - -/** - * Aspect to add a utility method in the JobInProgress for easing up the - * construction of the JobInfo object. - */ -privileged aspect JobInProgressAspect { - - /** - * Returns a read only view of the JobInProgress object which is used by the - * client. - * - * @return JobInfo of the current JobInProgress object - */ - public JobInfo JobInProgress.getJobInfo() { - String historyLoc = getHistoryPath(); - boolean isHistoryFileCopied = - this.status.getHistoryFile() == null ? false : true; - if (tasksInited.get()) { - return new JobInfoImpl( - this.getJobID(), this.isSetupLaunched(), this.isSetupFinished(), this - .isCleanupLaunched(), this.runningMaps(), this.runningReduces(), - this.pendingMaps(), this.pendingReduces(), this.finishedMaps(), this - .finishedReduces(), this.getStatus(), historyLoc, this - .getBlackListedTrackers(), false, this.numMapTasks, - this.numReduceTasks, isHistoryFileCopied); - } else { - return new JobInfoImpl( - this.getJobID(), false, false, false, 0, 0, this.pendingMaps(), this - .pendingReduces(), this.finishedMaps(), this.finishedReduces(), - this.getStatus(), historyLoc, this.getBlackListedTrackers(), this - .isComplete(), this.numMapTasks, this.numReduceTasks, false); - } - } - - private String JobInProgress.getHistoryPath() { - String historyLoc = ""; - if (this.isComplete()) { - historyLoc = this.getStatus().getHistoryFile(); - } else { - Path jobHistoryDirectory = this.jobHistory.getJobHistoryLocation(); - Path historypath = - JobHistory.getJobHistoryFile( - jobHistoryDirectory, this.getJobID(), this.profile.getUser()); - historyLoc = historypath.toString(); - } - return historyLoc; - } - -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj deleted file mode 100644 index dc005d783c5..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.List; -import java.util.ArrayList; -import java.util.Set; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TTInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.system.DaemonProtocol; - -/** - * Aspect class which injects the code for {@link JobTracker} class. - * - */ -public privileged aspect JobTrackerAspect { - - - public Configuration JobTracker.getDaemonConf() throws IOException { - return conf; - } - /** - * Method to get the read only view of the job and its associated information. - * - * @param jobID - * id of the job for which information is required. - * @return JobInfo of the job requested - * @throws IOException - */ - public JobInfo JobTracker.getJobInfo(JobID jobID) throws IOException { - JobInProgress jip = jobs.get(org.apache.hadoop.mapred.JobID - .downgrade(jobID)); - if (jip == null) { - LOG.warn("No job present for : " + jobID); - return null; - } - JobInfo info; - synchronized (jip) { - info = jip.getJobInfo(); - } - return info; - } - - /** - * Method to get the read only view of the task and its associated - * information. - * - * @param taskID - * @return - * @throws IOException - */ - public TaskInfo JobTracker.getTaskInfo(TaskID taskID) throws IOException { - TaskInProgress tip = getTip(org.apache.hadoop.mapred.TaskID - .downgrade(taskID)); - - if (tip == null) { - LOG.warn("No task present for : " + taskID); - return null; - } - return getTaskInfo(tip); - } - - public TTInfo JobTracker.getTTInfo(String trackerName) throws IOException { - org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker tt = taskTrackers - .get(trackerName); - if (tt == null) { - LOG.warn("No task tracker with name : " + trackerName + " found"); - return null; - } - TaskTrackerStatus status = tt.getStatus(); - TTInfo info = new TTInfoImpl(status.trackerName, status); - return info; - } - - // XXX Below two method don't reuse getJobInfo and getTaskInfo as there is a - // possibility that retire job can run and remove the job from JT memory - // during - // processing of the RPC call. - public JobInfo[] JobTracker.getAllJobInfo() throws IOException { - List infoList = new ArrayList(); - synchronized (jobs) { - for (JobInProgress jip : jobs.values()) { - JobInfo info = jip.getJobInfo(); - infoList.add(info); - } - } - return (JobInfo[]) infoList.toArray(new JobInfo[infoList.size()]); - } - - public TaskInfo[] JobTracker.getTaskInfo(JobID jobID) throws IOException { - JobInProgress jip = jobs.get(org.apache.hadoop.mapred.JobID - .downgrade(jobID)); - if (jip == null) { - LOG.warn("Unable to find job : " + jobID); - return null; - } - List infoList = new ArrayList(); - synchronized (jip) { - for (TaskInProgress tip : jip.setup) { - infoList.add(getTaskInfo(tip)); - } - for (TaskInProgress tip : jip.maps) { - infoList.add(getTaskInfo(tip)); - } - for (TaskInProgress tip : jip.reduces) { - infoList.add(getTaskInfo(tip)); - } - for (TaskInProgress tip : jip.cleanup) { - infoList.add(getTaskInfo(tip)); - } - } - return (TaskInfo[]) infoList.toArray(new TaskInfo[infoList.size()]); - } - - public TTInfo[] JobTracker.getAllTTInfo() throws IOException { - List infoList = new ArrayList(); - synchronized (taskTrackers) { - for (TaskTracker tt : taskTrackers.values()) { - TaskTrackerStatus status = tt.getStatus(); - TTInfo info = new TTInfoImpl(status.trackerName, status); - infoList.add(info); - } - } - return (TTInfo[]) infoList.toArray(new TTInfo[infoList.size()]); - } - - public boolean JobTracker.isJobRetired(JobID id) throws IOException { - return retireJobs.get( - org.apache.hadoop.mapred.JobID.downgrade(id))!=null?true:false; - } - - public String JobTracker.getJobHistoryLocationForRetiredJob( - JobID id) throws IOException { - String historyFile = this.getJobStatus(id).getHistoryFile(); - if(historyFile == null) { - throw new IOException("The retired job information for the job : " - + id +" is not found"); - } else { - return historyFile; - } - } - pointcut getVersionAspect(String protocol, long clientVersion) : - execution(public long JobTracker.getProtocolVersion(String , - long) throws IOException) && args(protocol, clientVersion); - - long around(String protocol, long clientVersion) : - getVersionAspect(protocol, clientVersion) { - if (protocol.equals(DaemonProtocol.class.getName())) { - return DaemonProtocol.versionID; - } else if (protocol.equals(JTProtocol.class.getName())) { - return JTProtocol.versionID; - } else { - return proceed(protocol, clientVersion); - } - } - - /** - * Point cut which monitors for the start of the jobtracker and sets the right - * value if the jobtracker is started. - */ - pointcut jtConstructorPointCut() : - call(JobTracker.new(..)); - - after() returning (JobTracker tracker): jtConstructorPointCut() { - try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - tracker.setUser(ugi.getShortUserName()); - } catch (IOException e) { - tracker.LOG.warn("Unable to get the user information for the " - + "Jobtracker"); - } - tracker.setReady(true); - } - - private TaskInfo JobTracker.getTaskInfo(TaskInProgress tip) { - TaskStatus[] status = tip.getTaskStatuses(); - if (status == null) { - if (tip.isMapTask()) { - status = new MapTaskStatus[]{}; - } - else { - status = new ReduceTaskStatus[]{}; - } - } - String[] trackers = - (String[]) (tip.getActiveTasks().values()).toArray(new String[tip - .getActiveTasks().values().size()]); - TaskInfo info = - new TaskInfoImpl(tip.getTIPId(), tip.getProgress(), tip - .getActiveTasks().size(), tip.numKilledTasks(), tip - .numTaskFailures(), status, (tip.isJobSetupTask() || tip - .isJobCleanupTask()), trackers); - return info; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj deleted file mode 100644 index 482e7d49a66..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.util.ArrayList; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapreduce.test.system.TTProtocol; -import org.apache.hadoop.security.authorize.Service; -import org.apache.hadoop.test.system.DaemonProtocol; - -/** - * This aspect adds two MR specific Herriot protocols tp the list of - * 'authorized' Herriot protocols. Protocol descriptors i.e. - * 'security.tt.protocol.acl' have to be added to hadoop-policy.xml - * if present - */ -public privileged aspect MapReducePolicyProviderAspect { - private static final Log LOG = LogFactory - .getLog(MapReducePolicyProviderAspect.class); - ArrayList herriotMRServices = null; - - pointcut updateMRServices() : - execution (public Service[] MapReducePolicyProvider.getServices()); - - Service[] around() : updateMRServices () { - herriotMRServices = new ArrayList(); - for (Service s : MapReducePolicyProvider.mapReduceServices) { - LOG.debug("Copying configured protocol to " - + s.getProtocol().getCanonicalName()); - herriotMRServices.add(s); - } - herriotMRServices.add(new Service("security.daemon.protocol.acl", - DaemonProtocol.class)); - herriotMRServices.add(new Service("security.tt.protocol.acl", - TTProtocol.class)); - final Service[] retArray = herriotMRServices - .toArray(new Service[herriotMRServices.size()]); - LOG.debug("Number of configured protocols to return: " + retArray.length); - return retArray; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj deleted file mode 100644 index 8c3326bf60c..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.mapred.Task.TaskReporter; -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.test.system.ControlAction; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.mapreduce.test.system.TTProtocol; - -public privileged aspect TaskAspect { - - private static final Log LOG = LogFactory.getLog(TaskAspect.class); - - private Object waitObject = new Object(); - private AtomicBoolean isWaitingForSignal = new AtomicBoolean(false); - - private DaemonProtocol daemonProxy; - - pointcut taskDoneIntercept(Task task) : execution( - public void Task.done(..)) && target(task); - - void around(Task task) : taskDoneIntercept(task) { - if(task.isJobCleanupTask() || task.isJobSetupTask() || task.isTaskCleanupTask()) { - proceed(task); - return; - } - Configuration conf = task.getConf(); - boolean controlEnabled = FinishTaskControlAction.isControlActionEnabled(conf); - if(controlEnabled) { - LOG.info("Task control enabled, waiting till client sends signal to " + - "complete"); - try { - synchronized (waitObject) { - isWaitingForSignal.set(true); - waitObject.wait(); - } - } catch (InterruptedException e) { - } - } - proceed(task); - return; - } - - pointcut taskStatusUpdate(TaskReporter reporter, TaskAttemptID id) : - call(public boolean TaskUmbilicalProtocol.ping(TaskAttemptID)) - && this(reporter) && args(id); - - after(TaskReporter reporter, TaskAttemptID id) throws IOException : - taskStatusUpdate(reporter, id) { - synchronized (waitObject) { - if(isWaitingForSignal.get()) { - ControlAction[] actions = daemonProxy.getActions( - id.getTaskID()); - if(actions.length == 0) { - return; - } - boolean shouldProceed = false; - for(ControlAction action : actions) { - if (action instanceof FinishTaskControlAction) { - LOG.info("Recv : Control task action to finish task id: " - + action.getTarget()); - shouldProceed = true; - daemonProxy.removeAction(action); - LOG.info("Removed the control action from TaskTracker"); - break; - } - } - if(shouldProceed) { - LOG.info("Notifying the task to completion"); - waitObject.notify(); - } - } - } - } - - - pointcut rpcInterceptor(Class k, long version,InetSocketAddress addr, - Configuration conf) : call( - public static * RPC.getProxy(Class, long ,InetSocketAddress, - Configuration)) && args(k, version,addr, conf) && - within(org.apache.hadoop.mapred.Child) ; - - after(Class k, long version, InetSocketAddress addr, Configuration conf) - throws IOException : rpcInterceptor(k, version, addr, conf) { - daemonProxy = - (TTProtocol) RPC.getProxy( - TTProtocol.class, TTProtocol.versionID, addr, conf); - } - -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj deleted file mode 100644 index 51bcdb70e6a..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.List; -import java.util.ArrayList; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.test.system.TTProtocol; -import org.apache.hadoop.mapreduce.test.system.TTTaskInfo; -import org.apache.hadoop.mapred.TTTaskInfoImpl.MapTTTaskInfo; -import org.apache.hadoop.mapred.TTTaskInfoImpl.ReduceTTTaskInfo; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; -import org.apache.hadoop.mapreduce.TaskAttemptID; - -public privileged aspect TaskTrackerAspect { - - declare parents : TaskTracker implements TTProtocol; - - // Add a last sent status field to the Tasktracker class. - TaskTrackerStatus TaskTracker.lastSentStatus = null; - public static String TaskTracker.TASKJARDIR = TaskTracker.JARSDIR; - - public synchronized TaskTrackerStatus TaskTracker.getStatus() - throws IOException { - return lastSentStatus; - } - - public Configuration TaskTracker.getDaemonConf() throws IOException { - return fConf; - } - - public TTTaskInfo[] TaskTracker.getTasks() throws IOException { - List infoList = new ArrayList(); - synchronized (tasks) { - for (TaskInProgress tip : tasks.values()) { - TTTaskInfo info = getTTTaskInfo(tip); - infoList.add(info); - } - } - return (TTTaskInfo[]) infoList.toArray(new TTTaskInfo[infoList.size()]); - } - - public TTTaskInfo TaskTracker.getTask(org.apache.hadoop.mapreduce.TaskID id) - throws IOException { - TaskID old = org.apache.hadoop.mapred.TaskID.downgrade(id); - synchronized (tasks) { - for(TaskAttemptID ta : tasks.keySet()) { - if(old.equals(ta.getTaskID())) { - return getTTTaskInfo(tasks.get(ta)); - } - } - } - return null; - } - - private TTTaskInfo TaskTracker.getTTTaskInfo(TaskInProgress tip) { - TTTaskInfo info; - if (tip.task.isMapTask()) { - info = new MapTTTaskInfo(tip.slotTaken, tip.wasKilled, - (MapTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask() - .getUser(), tip.getTask().isTaskCleanupTask(), getPid(tip.getTask().getTaskID())); - } else { - info = new ReduceTTTaskInfo(tip.slotTaken, tip.wasKilled, - (ReduceTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask() - .getUser(), tip.getTask().isTaskCleanupTask(),getPid(tip.getTask().getTaskID())); - } - return info; - } - - before(TaskTrackerStatus newStatus, TaskTracker tracker) : - set(TaskTrackerStatus TaskTracker.status) - && args(newStatus) && this(tracker) { - if (newStatus == null) { - tracker.lastSentStatus = tracker.status; - } - } - - pointcut ttConstructorPointCut(JobConf conf) : - call(TaskTracker.new(JobConf)) - && args(conf); - - after(JobConf conf) returning (TaskTracker tracker): - ttConstructorPointCut(conf) { - try { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - tracker.setUser(ugi.getShortUserName()); - } catch (IOException e) { - tracker.LOG.warn("Unable to get the user information for the " + - "Jobtracker"); - } - tracker.setReady(true); - } - - pointcut getVersionAspect(String protocol, long clientVersion) : - execution(public long TaskTracker.getProtocolVersion(String , - long) throws IOException) && args(protocol, clientVersion); - - long around(String protocol, long clientVersion) : - getVersionAspect(protocol, clientVersion) { - if(protocol.equals(DaemonProtocol.class.getName())) { - return DaemonProtocol.versionID; - } else if(protocol.equals(TTProtocol.class.getName())) { - return TTProtocol.versionID; - } else { - return proceed(protocol, clientVersion); - } - } - - public boolean TaskTracker.isProcessTreeAlive(String pid) throws IOException { - // Command to be executed is as follows : - // ps -o pid,ppid,sid,command -e | grep -v ps | grep -v grep | grep - // "$pid" - String checkerCommand = - getDaemonConf().get( - "test.system.processgroup_checker_command", - "ps -o pid,ppid,sid,command -e " - + "| grep -v ps | grep -v grep | grep \"$"); - String[] command = - new String[] { "bash", "-c", checkerCommand + pid + "\"" }; - ShellCommandExecutor shexec = new ShellCommandExecutor(command); - try { - shexec.execute(); - } catch (Shell.ExitCodeException e) { - TaskTracker.LOG - .info("The process tree grep threw a exitcode exception pointing " - + "to process tree not being alive."); - return false; - } - TaskTracker.LOG.info("The task grep command is : " - + shexec.toString() + " the output from command is : " - + shexec.getOutput()); - return true; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj b/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj deleted file mode 100644 index 04b1537cdf2..00000000000 --- a/hadoop-mapreduce-project/src/test/system/aop/org/apache/hadoop/mapreduce/ClusterAspect.aj +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapreduce; - -import org.apache.hadoop.mapreduce.protocol.ClientProtocol; - -public privileged aspect ClusterAspect { - - public ClientProtocol Cluster.getClientProtocol() { - return client; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml b/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml deleted file mode 100644 index ff43e57f95b..00000000000 --- a/hadoop-mapreduce-project/src/test/system/conf/system-test-mapred.xml +++ /dev/null @@ -1,133 +0,0 @@ - - - - - - - - - - test.system.hdrc.hadoophome - $(TO_DO_HADOOP_INSTALL)/share/hadoop-current - This is the path to the home directory of the hadoop deployment. - - - - test.system.hdrc.hadoopconfdir - $(TO_DO_HADOOP_INSTALL)/conf/hadoop - This is the path to the configuration directory of the hadoop - cluster that is deployed. - - - - - test.system.hdrc.tt.hostfile - slaves.localcopy.txt - File name containing the hostnames where the TaskTrackers are running. - - - - - test.system.mr.clusterprocess.impl.class - org.apache.hadoop.mapreduce.test.system.MRCluster$MRProcessManager - - Cluster process manager for the Mapreduce subsystem of the cluster. The value - org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager can - be used to enable multi user support. - - - - - test.system.hdrc.deployed.scripts.dir - ./src/test/system/scripts - - This directory hosts the scripts in the deployed location where - the system test client runs. - - - - - test.system.hdrc.hadoopnewconfdir - $(TO_DO_GLOBAL_TMP_DIR)/newconf - - The directory where the new config files will be copied to in all - the clusters is pointed out this directory. - - - - - test.system.hdrc.suspend.cmd - kill -SIGSTOP - - Command for suspending the given process. - - - - - test.system.hdrc.resume.cmd - kill -SIGCONT - - Command for resuming the given suspended process. - - - - test.system.hdrc.hadoop.local.confdir - $(TO_DO_GLOBAL_TMP_DIR)/localconf - - A local directory where a new config file is placed before - being pushed into new config location on the cluster. - - - - - - - test.system.mr.clusterprocess.impl.class - org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager - - Enabling multi user based cluster process manger. - - - - - test.system.hdrc.multi-user.list.path - $(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers - - Multi user list for creating the proxy users. - - - - - test.system.hdrc.multi-user.binary.path - $(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs - - Local file system path on gate way to cluster-controller binary including the binary name. - To build the binary the following commands need to be executed: - % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster) - % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path - Location of the cluster is important security precaution. - The binary should be owned by root and test user group permission should be set such a - way that it can be executed by binary. Example usage would be: - % sudo chown root binary - % sudo chmod 6511 binary - Change permission appropriately to make it more secure. - - - - - test.system.hdrc.multi-user.managinguser.jobtracker - * - - User value for managing the particular daemon, please note that these user should be - present on gateways also, an example configuration for the above would be - key name = test.system.hdrc.multi-user.managinguser.jobtracker - key value = guest - Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command. - - - - test.system.hdrc.multi-user.managinguser.tasktracker - * - - - diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java deleted file mode 100644 index 28b2e72637c..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/JobInfoImpl.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; - -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.test.system.JobInfo; - -/** - * Concrete implementation of the JobInfo interface which is exposed to the - * clients. - * Look at {@link JobInfo} for further details. - */ -class JobInfoImpl implements JobInfo { - - private List blackListedTracker; - private String historyUrl; - private JobID id; - private boolean setupLaunched; - private boolean setupFinished; - private boolean cleanupLaunched; - private JobStatus status; - private int runningMaps; - private int runningReduces; - private int waitingMaps; - private int waitingReduces; - private int finishedMaps; - private int finishedReduces; - private int numMaps; - private int numReduces; - private boolean historyCopied; - - public JobInfoImpl() { - id = new JobID(); - status = new JobStatus(); - blackListedTracker = new LinkedList(); - historyUrl = ""; - } - - public JobInfoImpl( - JobID id, boolean setupLaunched, boolean setupFinished, - boolean cleanupLaunched, int runningMaps, int runningReduces, - int waitingMaps, int waitingReduces, int finishedMaps, - int finishedReduces, JobStatus status, String historyUrl, - List blackListedTracker, boolean isComplete, int numMaps, - int numReduces, boolean historyCopied) { - super(); - this.blackListedTracker = blackListedTracker; - this.historyUrl = historyUrl; - this.id = id; - this.setupLaunched = setupLaunched; - this.setupFinished = setupFinished; - this.cleanupLaunched = cleanupLaunched; - this.status = status; - this.runningMaps = runningMaps; - this.runningReduces = runningReduces; - this.waitingMaps = waitingMaps; - this.waitingReduces = waitingReduces; - this.finishedMaps = finishedMaps; - this.finishedReduces = finishedReduces; - this.numMaps = numMaps; - this.numReduces = numReduces; - this.historyCopied = historyCopied; - } - - @Override - public List getBlackListedTrackers() { - return blackListedTracker; - } - - @Override - public String getHistoryUrl() { - return historyUrl; - } - - @Override - public JobID getID() { - return id; - } - - @Override - public JobStatus getStatus() { - return status; - } - - @Override - public boolean isCleanupLaunched() { - return cleanupLaunched; - } - - @Override - public boolean isSetupLaunched() { - return setupLaunched; - } - - @Override - public boolean isSetupFinished() { - return setupFinished; - } - - @Override - public int runningMaps() { - return runningMaps; - } - - @Override - public int runningReduces() { - return runningReduces; - } - - @Override - public int waitingMaps() { - return waitingMaps; - } - - @Override - public int waitingReduces() { - return waitingReduces; - } - - @Override - public int finishedMaps() { - return finishedMaps; - } - - @Override - public int finishedReduces() { - return finishedReduces; - } - - @Override - public int numMaps() { - return numMaps; - } - - @Override - public int numReduces() { - return numReduces; - } - - @Override - public boolean isHistoryFileCopied() { - return historyCopied; - } - - @Override - public void readFields(DataInput in) throws IOException { - id.readFields(in); - setupLaunched = in.readBoolean(); - setupFinished = in.readBoolean(); - cleanupLaunched = in.readBoolean(); - status.readFields(in); - runningMaps = in.readInt(); - runningReduces = in.readInt(); - waitingMaps = in.readInt(); - waitingReduces = in.readInt(); - historyUrl = in.readUTF(); - int size = in.readInt(); - for (int i = 0; i < size; i++) { - blackListedTracker.add(in.readUTF()); - } - finishedMaps = in.readInt(); - finishedReduces = in.readInt(); - numMaps = in.readInt(); - numReduces = in.readInt(); - historyCopied = in.readBoolean(); - } - - @Override - public void write(DataOutput out) throws IOException { - id.write(out); - out.writeBoolean(setupLaunched); - out.writeBoolean(setupFinished); - out.writeBoolean(cleanupLaunched); - status.write(out); - out.writeInt(runningMaps); - out.writeInt(runningReduces); - out.writeInt(waitingMaps); - out.writeInt(waitingReduces); - out.writeUTF(historyUrl); - out.writeInt(blackListedTracker.size()); - for (String str : blackListedTracker) { - out.writeUTF(str); - } - out.writeInt(finishedMaps); - out.writeInt(finishedReduces); - out.writeInt(numMaps); - out.writeInt(numReduces); - out.writeBoolean(historyCopied); - } - - -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java deleted file mode 100644 index d17e171dc3a..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTInfoImpl.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.mapred.TaskTrackerStatus; -import org.apache.hadoop.mapreduce.test.system.TTInfo; - -/** - * Concrete implementation of the TaskTracker information which is passed to - * the client from JobTracker. - * Look at {@link TTInfo} - */ - -class TTInfoImpl implements TTInfo { - - private String taskTrackerName; - private TaskTrackerStatus status; - - public TTInfoImpl() { - taskTrackerName = ""; - status = new TaskTrackerStatus(); - } - - public TTInfoImpl(String taskTrackerName, TaskTrackerStatus status) { - super(); - this.taskTrackerName = taskTrackerName; - this.status = status; - } - - @Override - public String getName() { - return taskTrackerName; - } - - @Override - public TaskTrackerStatus getStatus() { - return status; - } - - @Override - public void readFields(DataInput in) throws IOException { - taskTrackerName = in.readUTF(); - status.readFields(in); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeUTF(taskTrackerName); - status.write(out); - } - -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java deleted file mode 100644 index ed279dea7a7..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.test.system.TTTaskInfo; -/** - * Abstract class which passes the Task view of the TaskTracker to the client. - * See {@link TTInfoImpl} for further details. - * - */ -abstract class TTTaskInfoImpl implements TTTaskInfo { - - private boolean slotTaken; - private boolean wasKilled; - TaskStatus status; - Configuration conf; - String user; - boolean isTaskCleanupTask; - private String pid; - - public TTTaskInfoImpl() { - } - - public TTTaskInfoImpl(boolean slotTaken, boolean wasKilled, - TaskStatus status, Configuration conf, String user, - boolean isTaskCleanupTask, String pid) { - super(); - this.slotTaken = slotTaken; - this.wasKilled = wasKilled; - this.status = status; - this.conf = conf; - this.user = user; - this.isTaskCleanupTask = isTaskCleanupTask; - this.pid = pid; - } - - @Override - public boolean slotTaken() { - return slotTaken; - } - - @Override - public boolean wasKilled() { - return wasKilled; - } - - @Override - public abstract TaskStatus getTaskStatus(); - - @Override - public Configuration getConf() { - return conf; - } - - @Override - public String getUser() { - return user; - } - - @Override - public boolean isTaskCleanupTask() { - return isTaskCleanupTask; - } - - @Override - public String getPid() { - return pid; - } - - @Override - public void readFields(DataInput in) throws IOException { - slotTaken = in.readBoolean(); - wasKilled = in.readBoolean(); - conf = new Configuration(); - conf.readFields(in); - user = in.readUTF(); - isTaskCleanupTask = in.readBoolean(); - pid = in.readUTF(); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeBoolean(slotTaken); - out.writeBoolean(wasKilled); - conf.write(out); - out.writeUTF(user); - out.writeBoolean(isTaskCleanupTask); - if (pid != null) { - out.writeUTF(pid); - } else { - out.writeUTF(""); - } - status.write(out); - } - - static class MapTTTaskInfo extends TTTaskInfoImpl { - - public MapTTTaskInfo() { - super(); - } - - public MapTTTaskInfo(boolean slotTaken, boolean wasKilled, - MapTaskStatus status, Configuration conf, String user, - boolean isTaskCleanup,String pid) { - super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid); - } - - @Override - public TaskStatus getTaskStatus() { - return status; - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - status = new MapTaskStatus(); - status.readFields(in); - } - } - - static class ReduceTTTaskInfo extends TTTaskInfoImpl { - - public ReduceTTTaskInfo() { - super(); - } - - public ReduceTTTaskInfo(boolean slotTaken, boolean wasKilled, - ReduceTaskStatus status, Configuration conf, String user, - boolean isTaskCleanup, String pid) { - super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid); - } - - @Override - public TaskStatus getTaskStatus() { - return status; - } - - public void readFields(DataInput in) throws IOException { - super.readFields(in); - status = new ReduceTaskStatus(); - status.readFields(in); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java deleted file mode 100644 index 6871203afc9..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapred/TaskInfoImpl.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.mapred.TaskStatus; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; - -/** - * Concrete class to expose out the task related information to the Clients from - * the JobTracker. Look at {@link TaskInfo} for further details. - */ -class TaskInfoImpl implements TaskInfo { - - private double progress; - private TaskID taskID; - private int killedAttempts; - private int failedAttempts; - private int runningAttempts; - private TaskStatus[] taskStatus; - private boolean setupOrCleanup; - private String[] taskTrackers; - - public TaskInfoImpl() { - taskID = new TaskID(); - } - - public TaskInfoImpl( - TaskID taskID, double progress, int runningAttempts, int killedAttempts, - int failedAttempts, TaskStatus[] taskStatus, boolean setupOrCleanup, - String[] taskTrackers) { - this.progress = progress; - this.taskID = taskID; - this.killedAttempts = killedAttempts; - this.failedAttempts = failedAttempts; - this.runningAttempts = runningAttempts; - if (taskStatus != null) { - this.taskStatus = taskStatus; - } else { - if (taskID.getTaskType() == TaskType.MAP) { - this.taskStatus = new MapTaskStatus[] {}; - } else { - this.taskStatus = new ReduceTaskStatus[] {}; - } - } - this.setupOrCleanup = setupOrCleanup; - this.taskTrackers = taskTrackers; - } - - @Override - public double getProgress() { - return progress; - } - - @Override - public TaskID getTaskID() { - return taskID; - } - - @Override - public int numKilledAttempts() { - return killedAttempts; - } - - @Override - public int numFailedAttempts() { - return failedAttempts; - } - - @Override - public int numRunningAttempts() { - return runningAttempts; - } - - @Override - public void readFields(DataInput in) throws IOException { - taskID.readFields(in); - progress = in.readDouble(); - runningAttempts = in.readInt(); - killedAttempts = in.readInt(); - failedAttempts = in.readInt(); - int size = in.readInt(); - if (taskID.getTaskType() == TaskType.MAP) { - taskStatus = new MapTaskStatus[size]; - } else { - taskStatus = new ReduceTaskStatus[size]; - } - for (int i = 0; i < size; i++) { - if (taskID.getTaskType() == TaskType.MAP) { - taskStatus[i] = new MapTaskStatus(); - } else { - taskStatus[i] = new ReduceTaskStatus(); - } - taskStatus[i].readFields(in); - taskStatus[i].setTaskTracker(in.readUTF()); - } - setupOrCleanup = in.readBoolean(); - size = in.readInt(); - taskTrackers = new String[size]; - for (int i = 0; i < size; i++) { - taskTrackers[i] = in.readUTF(); - } - } - - @Override - public void write(DataOutput out) throws IOException { - taskID.write(out); - out.writeDouble(progress); - out.writeInt(runningAttempts); - out.writeInt(killedAttempts); - out.writeInt(failedAttempts); - out.writeInt(taskStatus.length); - for (TaskStatus t : taskStatus) { - t.write(out); - out.writeUTF(t.getTaskTracker()); - } - out.writeBoolean(setupOrCleanup); - out.writeInt(taskTrackers.length); - for (String tt : taskTrackers) { - out.writeUTF(tt); - } - } - - @Override - public TaskStatus[] getTaskStatus() { - return taskStatus; - } - - @Override - public boolean isSetupOrCleanup() { - return setupOrCleanup; - } - - @Override - public String[] getTaskTrackers() { - return taskTrackers; - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java deleted file mode 100644 index 64677b8d38e..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/FinishTaskControlAction.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.TaskID; -import org.apache.hadoop.test.system.ControlAction; - -/** - * Control Action which signals a controlled task to proceed to completion.
- */ -public class FinishTaskControlAction extends ControlAction { - - private static final String ENABLE_CONTROLLED_TASK_COMPLETION = - "test.system.enabled.task.completion.control"; - - /** - * Create a default control action.
- * - */ - public FinishTaskControlAction() { - super(new TaskID()); - } - - /** - * Create a control action specific to a particular task.
- * - * @param id - * of the task. - */ - public FinishTaskControlAction(TaskID id) { - super(id); - } - - /** - * Sets up the job to be controlled using the finish task control action. - *
- * - * @param conf - * configuration to be used submit the job. - */ - public static void configureControlActionForJob(Configuration conf) { - conf.setBoolean(ENABLE_CONTROLLED_TASK_COMPLETION, true); - } - - /** - * Checks if the control action is enabled in the passed configuration.
- * @param conf configuration - * @return true if action is enabled. - */ - public static boolean isControlActionEnabled(Configuration conf) { - return conf.getBoolean(ENABLE_CONTROLLED_TASK_COMPLETION, false); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java deleted file mode 100644 index 8c9146c8294..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTClient.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.io.IOException; - -import junit.framework.Assert; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.RunningJob; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.test.system.process.RemoteProcess; -import org.apache.hadoop.mapred.TaskStatus; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import static org.junit.Assert.*; - -/** - * JobTracker client for system tests. - */ -public class JTClient extends MRDaemonClient { - static final Log LOG = LogFactory.getLog(JTClient.class); - private JobClient client; - private static final String HADOOP_JT_OPTS_ENV = "HADOOP_JOBTRACKER_OPTS"; - - /** - * Create JobTracker client to talk to {@link JobTracker} specified in the - * configuration.
- * - * @param conf - * configuration used to create a client. - * @param daemon - * the process management instance for the {@link JobTracker} - * @throws IOException - */ - public JTClient(Configuration conf, RemoteProcess daemon) throws IOException { - super(conf, daemon); - } - - @Override - public synchronized void connect() throws IOException { - if (isConnected()) { - return; - } - client = new JobClient(new JobConf(getConf())); - setConnected(true); - } - - @Override - public synchronized void disconnect() throws IOException { - client.close(); - } - - @Override - public synchronized JTProtocol getProxy() { - return (JTProtocol) client.getProtocol(); - } - - /** - * Gets the {@link JobClient} which can be used for job submission. JobClient - * which is returned would not contain the decorated API's. To be used for - * submitting of the job. - * - * @return client handle to the JobTracker - */ - public JobClient getClient() { - return client; - } - - /** - * Gets the configuration which the JobTracker is currently running.
- * - * @return configuration of JobTracker. - * - * @throws IOException - */ - public Configuration getJobTrackerConfig() throws IOException { - return getProxy().getDaemonConf(); - } - - /** - * Kills the job.
- * - * @param id - * of the job to be killed. - * @throws IOException - */ - public void killJob(JobID id) throws IOException { - try { - getClient().killJob(id); - } catch (InterruptedException e) { - throw new IOException(e); - } - } - - /** - * Verification API to check running jobs and running job states. users have - * to ensure that their jobs remain running state while verification is - * called.
- * - * @param jobId - * of the job to be verified. - * - * @throws Exception - */ - public void verifyRunningJob(JobID jobId) throws Exception { - } - - private JobInfo getJobInfo(JobID jobId) throws IOException { - JobInfo info = getProxy().getJobInfo(jobId); - if (info == null && !getProxy().isJobRetired(jobId)) { - Assert.fail("Job id : " + jobId + " has never been submitted to JT"); - } - return info; - } - - /** - * Verification API to wait till job retires and verify all the retired state - * is correct.
- * - * @param job - * of the job used for completion - * @return job handle - * @throws Exception - */ - public Job submitAndVerifyJob(Job job) throws Exception { - job.submit(); - JobID jobId = job.getJobID(); - verifyRunningJob(jobId); - verifyCompletedJob(jobId); - return job; - } - - /** - * Verification API to check if the job completion state is correct.
- * - * @param id - * id of the job to be verified. - */ - - public void verifyCompletedJob(JobID id) throws Exception { - RunningJob rJob = - getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(id)); - while (!rJob.isComplete()) { - LOG.info("waiting for job :" + id + " to retire"); - Thread.sleep(1000); - rJob = getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(id)); - } - verifyJobDetails(id); - JobInfo jobInfo = getJobInfo(id); - if (jobInfo != null) { - while (!jobInfo.isHistoryFileCopied()) { - Thread.sleep(1000); - LOG.info(id + " waiting for history file to copied"); - jobInfo = getJobInfo(id); - if (jobInfo == null) { - break; - } - } - } - verifyJobHistory(id); - } - - /** - * Verification API to check if the job details are semantically correct.
- * - * @param jobId - * jobID of the job - * @return true if all the job verifications are verified to be true - * @throws Exception - */ - public void verifyJobDetails(JobID jobId) throws Exception { - // wait till the setup is launched and finished. - JobInfo jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - LOG.info("waiting for the setup to be finished"); - while (!jobInfo.isSetupFinished()) { - Thread.sleep(2000); - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - break; - } - } - // verify job id. - assertTrue(jobId.toString().startsWith("job_")); - LOG.info("verified job id and is : " + jobId.toString()); - // verify the number of map/reduce tasks. - verifyNumTasks(jobId); - // should verify job progress. - verifyJobProgress(jobId); - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - if (jobInfo.getStatus().getRunState() == JobStatus.SUCCEEDED) { - // verify if map/reduce progress reached 1. - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - assertEquals(1.0, jobInfo.getStatus().mapProgress(), 0.001); - assertEquals(1.0, jobInfo.getStatus().reduceProgress(), 0.001); - // verify successful finish of tasks. - verifyAllTasksSuccess(jobId); - } - if (jobInfo.getStatus().isJobComplete()) { - // verify if the cleanup is launched. - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - assertTrue(jobInfo.isCleanupLaunched()); - LOG.info("Verified launching of cleanup"); - } - } - - public void verifyAllTasksSuccess(JobID jobId) throws IOException { - JobInfo jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - - TaskInfo[] taskInfos = getProxy().getTaskInfo(jobId); - - if (taskInfos.length == 0 && getProxy().isJobRetired(jobId)) { - LOG.info("Job has been retired from JT memory : " + jobId); - return; - } - - for (TaskInfo taskInfo : taskInfos) { - TaskStatus[] taskStatus = taskInfo.getTaskStatus(); - if (taskStatus != null && taskStatus.length > 0) { - int i; - for (i = 0; i < taskStatus.length; i++) { - if (TaskStatus.State.SUCCEEDED.equals(taskStatus[i].getRunState())) { - break; - } - } - assertFalse(i == taskStatus.length); - } - } - LOG.info("verified that none of the tasks failed."); - } - - public void verifyJobProgress(JobID jobId) throws IOException { - JobInfo jobInfo; - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - assertTrue(jobInfo.getStatus().mapProgress() >= 0 - && jobInfo.getStatus().mapProgress() <= 1); - LOG.info("verified map progress and is " - + jobInfo.getStatus().mapProgress()); - assertTrue(jobInfo.getStatus().reduceProgress() >= 0 - && jobInfo.getStatus().reduceProgress() <= 1); - LOG.info("verified reduce progress and is " - + jobInfo.getStatus().reduceProgress()); - } - - public void verifyNumTasks(JobID jobId) throws IOException { - JobInfo jobInfo; - jobInfo = getJobInfo(jobId); - if (jobInfo == null) { - return; - } - assertEquals(jobInfo.numMaps(), (jobInfo.runningMaps() - + jobInfo.waitingMaps() + jobInfo.finishedMaps())); - LOG.info("verified number of map tasks and is " + jobInfo.numMaps()); - - assertEquals(jobInfo.numReduces(), (jobInfo.runningReduces() - + jobInfo.waitingReduces() + jobInfo.finishedReduces())); - LOG.info("verified number of reduce tasks and is " + jobInfo.numReduces()); - } - - /** - * Verification API to check if the job history file is semantically correct.
- * - * - * @param jobId - * of the job to be verified. - * @throws IOException - */ - public void verifyJobHistory(JobID jobId) throws IOException { - JobInfo info = getJobInfo(jobId); - String url = ""; - if (info == null) { - LOG.info("Job has been retired from JT memory : " + jobId); - url = getProxy().getJobHistoryLocationForRetiredJob(jobId); - } else { - url = info.getHistoryUrl(); - } - Path p = new Path(url); - if (p.toUri().getScheme().equals("file:/")) { - FileStatus st = getFileStatus(url, true); - Assert.assertNotNull("Job History file for " - + jobId + " not present " + "when job is completed", st); - } else { - FileStatus st = getFileStatus(url, false); - Assert.assertNotNull("Job History file for " - + jobId + " not present " + "when job is completed", st); - } - LOG.info("Verified the job history for the jobId : " + jobId); - } - - @Override - public String getHadoopOptsEnvName() { - return HADOOP_JT_OPTS_ENV; - } - - /** - * Concrete implementation of abstract super class method - * - * @param attributeName name of the attribute to be retrieved - * @return Object value of the given attribute - * @throws IOException is thrown in case of communication errors - */ - @Override - public Object getDaemonAttribute(String attributeName) throws IOException { - return getJmxAttribute("JobTracker", "JobTrackerInfo", attributeName); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java deleted file mode 100644 index 4e0f3c87846..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.io.IOException; - -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.test.system.DaemonProtocol; - -/** - * Client side API's exposed from JobTracker. - */ -public interface JTProtocol extends DaemonProtocol { - long versionID = 1L; - - /** - * Get the information pertaining to given job.
- * The returned JobInfo object can be null when the - * specified job by the job id is retired from the - * JobTracker memory which happens after job is - * completed.
- * - * @param id - * of the job for which information is required. - * @return information of regarding job null if job is - * retired from JobTracker memory. - * @throws IOException - */ - public JobInfo getJobInfo(JobID jobID) throws IOException; - - /** - * Gets the information pertaining to a task.
- * The returned TaskInfo object can be null when the - * specified task specified by the task id is retired - * from the JobTracker memory which happens after the - * job is completed.
- * @param id - * of the task for which information is required. - * @return information of regarding the task null if the - * task is retired from JobTracker memory. - * @throws IOException - */ - public TaskInfo getTaskInfo(TaskID taskID) throws IOException; - - /** - * Gets the information pertaining to a given TaskTracker.
- * The returned TTInfo class can be null if the given TaskTracker - * information is removed from JobTracker memory which is done - * when the TaskTracker is marked lost by the JobTracker.
- * @param name - * of the tracker. - * @return information regarding the tracker null if the TaskTracker - * is marked lost by the JobTracker. - * @throws IOException - */ - public TTInfo getTTInfo(String trackerName) throws IOException; - - /** - * Gets a list of all available jobs with JobTracker.
- * - * @return list of all jobs. - * @throws IOException - */ - public JobInfo[] getAllJobInfo() throws IOException; - - /** - * Gets a list of tasks pertaining to a job.
- * - * @param id - * of the job. - * - * @return list of all tasks for the job. - * @throws IOException - */ - public TaskInfo[] getTaskInfo(JobID jobID) throws IOException; - - /** - * Gets a list of TaskTrackers which have reported to the JobTracker.
- * - * @return list of all TaskTracker. - * @throws IOException - */ - public TTInfo[] getAllTTInfo() throws IOException; - - /** - * Checks if a given job is retired from the JobTrackers Memory.
- * - * @param id - * of the job - * @return true if job is retired. - * @throws IOException - */ - boolean isJobRetired(JobID jobID) throws IOException; - - /** - * Gets the location of the history file for a retired job.
- * - * @param id - * of the job - * @return location of history file - * @throws IOException - */ - String getJobHistoryLocationForRetiredJob(JobID jobID) throws IOException; -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java deleted file mode 100644 index b5f2f924915..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/JobInfo.java +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.util.List; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.JobStatus; -import org.apache.hadoop.mapreduce.JobID; - -/** - * Job state information as seen by the JobTracker. - */ -public interface JobInfo extends Writable { - /** - * Gets the JobId of the job.
- * - * @return id of the job. - */ - JobID getID(); - - /** - * Gets the current status of the job.
- * - * @return status. - */ - JobStatus getStatus(); - - /** - * Gets the history location of the job.
- * - * @return the path to the history file. - */ - String getHistoryUrl(); - - /** - * Gets the number of maps which are currently running for the job.
- * - * @return number of running for the job. - */ - int runningMaps(); - - /** - * Gets the number of reduces currently running for the job.
- * - * @return number of reduces running for the job. - */ - int runningReduces(); - - /** - * Gets the number of maps to be scheduled for the job.
- * - * @return number of waiting maps. - */ - int waitingMaps(); - - /** - * Gets the number of reduces to be scheduled for the job.
- * - * @return number of waiting reduces. - */ - int waitingReduces(); - - /** - * Gets the number of maps that are finished.
- * @return the number of finished maps. - */ - int finishedMaps(); - - /** - * Gets the number of map tasks that are to be spawned for the job
- * @return - */ - int numMaps(); - - /** - * Gets the number of reduce tasks that are to be spawned for the job
- * @return - */ - int numReduces(); - - /** - * Gets the number of reduces that are finished.
- * @return the number of finished reduces. - */ - int finishedReduces(); - - /** - * Gets if cleanup for the job has been launched.
- * - * @return true if cleanup task has been launched. - */ - boolean isCleanupLaunched(); - - /** - * Gets if the setup for the job has been launched.
- * - * @return true if setup task has been launched. - */ - boolean isSetupLaunched(); - - /** - * Gets if the setup for the job has been completed.
- * - * @return true if the setup task for the job has completed. - */ - boolean isSetupFinished(); - - /** - * Gets list of blacklisted trackers for the particular job.
- * - * @return list of blacklisted tracker name. - */ - List getBlackListedTrackers(); - - /** - * Gets if the history file of the job is copied to the done - * location
- * - * @return true if history file copied. - */ - boolean isHistoryFileCopied(); -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java deleted file mode 100644 index fc460cd0337..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.apache.hadoop.test.system.AbstractDaemonCluster; -import org.apache.hadoop.test.system.process.ClusterProcessManager; -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster; -import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster; -import org.apache.hadoop.test.system.process.RemoteProcess; -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo; - -/** - * Concrete AbstractDaemonCluster representing a Map-Reduce cluster. - * - */ -@SuppressWarnings("unchecked") -public class MRCluster extends AbstractDaemonCluster { - - private static final Log LOG = LogFactory.getLog(MRCluster.class); - public static final String CLUSTER_PROCESS_MGR_IMPL = - "test.system.mr.clusterprocess.impl.class"; - - /** - * Key is used to to point to the file containing hostnames of tasktrackers - */ - public static final String CONF_HADOOP_TT_HOSTFILE_NAME = - "test.system.hdrc.tt.hostfile"; - - private static List mrDaemonInfos = - new ArrayList(); - private static String TT_hostFileName; - private static String jtHostName; - private static final String SYSTEM_TEST_FILE = "system-test.xml"; - - protected enum Role {JT, TT}; - - static{ - Configuration.addDefaultResource("mapred-default.xml"); - Configuration.addDefaultResource("mapred-site.xml"); - } - - private MRCluster(Configuration conf, ClusterProcessManager rCluster) - throws IOException { - super(conf, rCluster); - } - - /** - * Factory method to create an instance of the Map-Reduce cluster.
- * - * @param conf - * contains all required parameter to create cluster. - * @return a cluster instance to be managed. - * @throws Exception - */ - public static MRCluster createCluster(Configuration conf) - throws Exception { - conf.addResource(SYSTEM_TEST_FILE); - TT_hostFileName = conf.get(CONF_HADOOP_TT_HOSTFILE_NAME, "slaves"); - String jtHostPort = conf.get(JTConfig.JT_IPC_ADDRESS); - if (jtHostPort == null) { - throw new Exception(JTConfig.JT_IPC_ADDRESS + "is not set or " - + SYSTEM_TEST_FILE + " hasn't been found."); - } - jtHostName = jtHostPort.trim().split(":")[0]; - - mrDaemonInfos.add(new HadoopDaemonInfo("jobtracker", - Role.JT, Arrays.asList(new String[]{jtHostName}))); - mrDaemonInfos.add(new HadoopDaemonInfo("tasktracker", - Role.TT, TT_hostFileName)); - - String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL); - if (implKlass == null || implKlass.isEmpty()) { - implKlass = MRProcessManager.class.getName(); - } - Class klass = (Class) Class - .forName(implKlass); - ClusterProcessManager clusterProcessMgr = klass.newInstance(); - LOG.info("Created ClusterProcessManager as " + implKlass); - clusterProcessMgr.init(conf); - return new MRCluster(conf, clusterProcessMgr); - } - - protected JTClient createJTClient(RemoteProcess jtDaemon) - throws IOException { - return new JTClient(getConf(), jtDaemon); - } - - protected TTClient createTTClient(RemoteProcess ttDaemon) - throws IOException { - return new TTClient(getConf(), ttDaemon); - } - - public JTClient getJTClient() { - Iterator it = getDaemons().get(Role.JT).iterator(); - return (JTClient) it.next(); - } - - public List getTTClients() { - return (List) getDaemons().get(Role.TT); - } - - public TTClient getTTClient(String hostname) { - for (TTClient c : getTTClients()) { - if (c.getHostName().equals(hostname)) { - return c; - } - } - return null; - } - - @Override - public void ensureClean() throws IOException { - //TODO: ensure that no jobs/tasks are running - //restart the cluster if cleanup fails - JTClient jtClient = getJTClient(); - JobInfo[] jobs = jtClient.getProxy().getAllJobInfo(); - for(JobInfo job : jobs) { - jtClient.killJob( - org.apache.hadoop.mapred.JobID.downgrade(job.getID())); - } - } - - @Override - protected AbstractDaemonClient createClient( - RemoteProcess process) throws IOException { - if (Role.JT.equals(process.getRole())) { - return createJTClient(process); - } else if (Role.TT.equals(process.getRole())) { - return createTTClient(process); - } else throw new IOException("Role: "+ process.getRole() + " is not " + - "applicable to MRCluster"); - } - - public static class MRProcessManager extends HadoopDaemonRemoteCluster{ - public MRProcessManager() { - super(mrDaemonInfos); - } - } - - public static class MultiMRProcessManager - extends MultiUserHadoopDaemonRemoteCluster { - public MultiMRProcessManager() { - super(mrDaemonInfos); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java deleted file mode 100644 index c1166d31741..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRDaemonClient.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.apache.hadoop.test.system.DaemonProtocol; -import org.apache.hadoop.test.system.process.RemoteProcess; - -/** - * Base class for JobTracker and TaskTracker clients. - */ -public abstract class MRDaemonClient - extends AbstractDaemonClient{ - - public MRDaemonClient(Configuration conf, RemoteProcess process) - throws IOException { - super(conf, process); - } - - public String[] getMapredLocalDirs() throws IOException { - return getProxy().getDaemonConf().getStrings(MRConfig.LOCAL_DIR); - } - - public String getLogDir() throws IOException { - return getProcessInfo().getSystemProperties().get("hadoop.log.dir"); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java deleted file mode 100644 index 5303309be24..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTClient.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import java.io.IOException; -import java.net.InetSocketAddress; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.TaskTrackerStatus; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.test.system.process.RemoteProcess; - -/** - * TaskTracker client for system tests. Assumption of the class is that the - * configuration key is set for the configuration key : {@code - * TTConfig.TT_REPORT_ADDRESS}is set, only the port portion of the - * address is used. - */ -public class TTClient extends MRDaemonClient { - - TTProtocol proxy; - private static final String SYSTEM_TEST_FILE = "system-test.xml"; - private static final String HADOOP_TT_OPTS_ENV = "HADOOP_TASKTRACKER_OPTS"; - - public TTClient(Configuration conf, RemoteProcess daemon) - throws IOException { - super(conf, daemon); - } - - @Override - public synchronized void connect() throws IOException { - if (isConnected()) { - return; - } - String sockAddrStr = getConf().get(TTConfig.TT_REPORT_ADDRESS); - if (sockAddrStr == null) { - throw new IllegalArgumentException( - "TaskTracker report address is not set"); - } - String[] splits = sockAddrStr.split(":"); - if (splits.length != 2) { - throw new IllegalArgumentException(TTConfig.TT_REPORT_ADDRESS - + " is not correctly configured or " - + SYSTEM_TEST_FILE + " hasn't been found."); - } - String port = splits[1]; - String sockAddr = getHostName() + ":" + port; - InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr); - proxy = (TTProtocol) RPC.getProxy(TTProtocol.class, TTProtocol.versionID, - bindAddr, getConf()); - setConnected(true); - } - - @Override - public synchronized void disconnect() throws IOException { - RPC.stopProxy(proxy); - } - - @Override - public synchronized TTProtocol getProxy() { - return proxy; - } - - /** - * Gets the last sent status to the {@link JobTracker}.
- * - * @return the task tracker status. - * @throws IOException - */ - public TaskTrackerStatus getStatus() throws IOException { - return getProxy().getStatus(); - } - - @Override - public String getHadoopOptsEnvName() { - return HADOOP_TT_OPTS_ENV; - } - - /** - * Concrete implementation of abstract super class method - * - * @param attributeName name of the attribute to be retrieved - * @return Object value of the given attribute - * @throws IOException is thrown in case of communication errors - */ - @Override - public Object getDaemonAttribute(String attributeName) throws IOException { - return getJmxAttribute("TaskTracker", "TaskTrackerInfo", attributeName); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java deleted file mode 100644 index 23c9459b9aa..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTInfo.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.TaskTracker; -import org.apache.hadoop.mapred.TaskTrackerStatus; - -/** - * TaskTracker state information as seen by the JobTracker. - */ -public interface TTInfo extends Writable { - /** - * Gets the {@link TaskTracker} name.
- * - * @return name of the tracker. - */ - String getName(); - - /** - * Gets the current status of the {@link TaskTracker}
- * - * @return status of the {@link TaskTracker} - */ - TaskTrackerStatus getStatus(); -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java deleted file mode 100644 index 58dce3a1c95..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.TaskTracker; -import org.apache.hadoop.mapred.TaskTrackerStatus; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.security.token.JobTokenSelector; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.token.TokenInfo; -import org.apache.hadoop.test.system.DaemonProtocol; - -import java.io.IOException; - -/** - * TaskTracker RPC interface to be used for cluster tests. - * - * The protocol has to be annotated so KerberosInfo can be filled in during - * creation of a ipc.Client connection - */ -@KerberosInfo( - serverPrincipal = TaskTracker.TT_USER_NAME) -@TokenInfo(JobTokenSelector.class) -public interface TTProtocol extends DaemonProtocol { - - public static final long versionID = 1L; - /** - * Gets latest status which was sent in heartbeat to the {@link JobTracker}. - *
- * - * @return status of the TaskTracker daemon - * @throws IOException in case of errors - */ - TaskTrackerStatus getStatus() throws IOException; - - /** - * Gets list of all the tasks in the {@link TaskTracker}.
- * - * @return list of all the tasks - * @throws IOException in case of errors - */ - TTTaskInfo[] getTasks() throws IOException; - - /** - * Gets the task associated with the id.
- * - * @param taskID of the task. - * - * @return returns task info TTTaskInfo - * @throws IOException in case of errors - */ - TTTaskInfo getTask(TaskID taskID) throws IOException; - - /** - * Checks if any of process in the process tree of the task is alive - * or not.
- * - * @param pid - * of the task attempt - * @return true if task process tree is alive. - * @throws IOException in case of errors - */ - boolean isProcessTreeAlive(String pid) throws IOException; -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java deleted file mode 100644 index f03173a5967..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.TaskStatus; -import org.apache.hadoop.mapred.TaskTracker; - -/** - * Task state information as seen by the TT. - */ -public interface TTTaskInfo extends Writable { - - /** - * Has task occupied a slot? A task occupies a slot once it starts localizing - * on the {@link TaskTracker}
- * - * @return true if task has started occupying a slot. - */ - boolean slotTaken(); - - /** - * Has the task been killed?
- * - * @return true, if task has been killed. - */ - boolean wasKilled(); - - /** - * Gets the task status associated with the particular task trackers task - * view.
- * - * @return status of the particular task - */ - TaskStatus getTaskStatus(); - - /** - * Gets the configuration object of the task. - * @return - */ - Configuration getConf(); - - /** - * Gets the user of the task. - * @return - */ - String getUser(); - - /** - * Provides information as to whether the task is a cleanup of task. - * @return true if it is a clean up of task. - */ - boolean isTaskCleanupTask(); - - /** - * Gets the pid of the running task on the task-tracker. - * - * @return pid of the task. - */ - String getPid(); -} diff --git a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java b/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java deleted file mode 100644 index 738b5967830..00000000000 --- a/hadoop-mapreduce-project/src/test/system/java/org/apache/hadoop/mapreduce/test/system/TaskInfo.java +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.test.system; - -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.TaskStatus; -import org.apache.hadoop.mapreduce.TaskID; - -/** - * Task state information of a TaskInProgress as seen by the {@link JobTracker} - */ -public interface TaskInfo extends Writable { - /** - * Gets the task id of the TaskInProgress. - * - * @return id of the task. - */ - TaskID getTaskID(); - - /** - * Number of times task attempts have failed for the given TaskInProgress. - *
- * - * @return number of failed task attempts. - */ - int numFailedAttempts(); - - /** - * Number of times task attempts have been killed for the given TaskInProgress - *
- * - * @return number of killed task attempts. - */ - int numKilledAttempts(); - - /** - * Gets the progress of the Task in percentage will be in range of 0.0-1.0 - *
- * - * @return progress of task in percentage. - */ - double getProgress(); - - /** - * Number of attempts currently running for the given TaskInProgress.
- * - * @return number of running attempts. - */ - int numRunningAttempts(); - - /** - * Array of TaskStatus objects that are related to the corresponding - * TaskInProgress object.The task status of the tip is only populated - * once a tracker reports back the task status.
- * - * @return list of task statuses. - */ - TaskStatus[] getTaskStatus(); - - /** - * Gets a list of tracker on which the task attempts are scheduled/running. - * Can be empty if the task attempt has succeeded
- * - * @return list of trackers - */ - String[] getTaskTrackers(); - - /** - * Gets if the current TaskInProgress is a setup or cleanup tip.
- * - * @return true if setup/cleanup - */ - boolean isSetupOrCleanup(); -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java deleted file mode 100644 index 413e3bb31b1..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestCluster.java +++ /dev/null @@ -1,325 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.security.PrivilegedExceptionAction; -import java.util.Collection; - -import org.junit.Assert; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.mapred.TaskStatus.State; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.mapreduce.test.system.JTClient; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.TTInfo; -import org.apache.hadoop.mapreduce.test.system.TTTaskInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestCluster { - - private static final Log LOG = LogFactory.getLog(TestCluster.class); - - private static MRCluster cluster; - - public TestCluster() throws Exception { - - } - - @BeforeClass - public static void before() throws Exception { - String [] expExcludeList = new String[2]; - expExcludeList[0] = "java.net.ConnectException"; - expExcludeList[1] = "java.io.IOException"; - cluster = MRCluster.createCluster(new Configuration()); - cluster.setExcludeExpList(expExcludeList); - cluster.setUp(); - } - - @AfterClass - public static void after() throws Exception { - cluster.tearDown(); - } - - @Test - public void testProcessInfo() throws Exception { - LOG.info("Process info of JobTracker is : " - + cluster.getJTClient().getProcessInfo()); - Assert.assertNotNull(cluster.getJTClient().getProcessInfo()); - Collection tts = cluster.getTTClients(); - for (TTClient tt : tts) { - LOG.info("Process info of TaskTracker is : " + tt.getProcessInfo()); - Assert.assertNotNull(tt.getProcessInfo()); - } - } - - @Test - public void testJobSubmission() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - SleepJob job = new SleepJob(); - job.setConf(conf); - Job rJob = job.createJob(1, 1, 100, 100, 100, 100); - rJob = cluster.getJTClient().submitAndVerifyJob(rJob); - cluster.getJTClient().verifyJobHistory(rJob.getJobID()); - } - - // @Test - public void testFileStatus() throws Exception { - UserGroupInformation ugi = - UserGroupInformation.createRemoteUser(cluster - .getJTClient().getProxy().getDaemonUser()); - ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Void run() throws Exception { - MRCluster myCluster = null; - try { - myCluster = MRCluster.createCluster(cluster.getConf()); - myCluster.connect(); - JTClient jt = myCluster.getJTClient(); - String dir = "."; - checkFileStatus(jt.getFileStatus(dir, true)); - checkFileStatus(jt.listStatus(dir, false, true), dir); - for (TTClient tt : myCluster.getTTClients()) { - String[] localDirs = tt.getMapredLocalDirs(); - for (String localDir : localDirs) { - checkFileStatus(tt.listStatus(localDir, true, false), localDir); - checkFileStatus(tt.listStatus(localDir, true, true), localDir); - } - } - String systemDir = jt.getClient().getSystemDir().toString(); - checkFileStatus(jt.listStatus(systemDir, false, true), systemDir); - checkFileStatus(jt.listStatus(jt.getLogDir(), true, true), jt - .getLogDir()); - } finally { - if (myCluster != null) { - myCluster.disconnect(); - } - } - return null; - } - }); - } - - private void checkFileStatus(FileStatus[] fs, String path) { - Assert.assertNotNull(fs); - LOG.info("-----Listing for " + path + " " + fs.length); - for (FileStatus fz : fs) { - checkFileStatus(fz); - } - } - - private void checkFileStatus(FileStatus fz) { - Assert.assertNotNull(fz); - LOG.info("FileStatus is " - + fz.getPath() + " " + fz.getPermission() + " " + fz.getOwner() - + " " + fz.getGroup() + " " + fz.getClass()); - } - - /** - * Test to verify the common properties of tasks. - * - * @throws Exception - */ - @Test - public void testTaskDetails() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - FinishTaskControlAction.configureControlActionForJob(conf); - SleepJob job = new SleepJob(); - job.setConf(conf); - - Job rJob = job.createJob(1, 1, 100, 100, 100, 100); - JobClient client = cluster.getJTClient().getClient(); - rJob.submit(); - RunningJob rJob1 = - client.getJob(org.apache.hadoop.mapred.JobID.downgrade(rJob.getJobID())); - JobID id = rJob.getJobID(); - - JobInfo jInfo = wovenClient.getJobInfo(id); - - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - Thread.sleep(1000); - jInfo = wovenClient.getJobInfo(id); - } - - LOG.info("Waiting till job starts running one map"); - - TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id); - boolean isOneTaskStored = false; - String sometaskpid = null; - org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null; - TTClient myCli = null; - for (TaskInfo info : myTaskInfos) { - if (!info.isSetupOrCleanup()) { - String[] taskTrackers = info.getTaskTrackers(); - for (String taskTracker : taskTrackers) { - TTInfo ttInfo = wovenClient.getTTInfo(taskTracker); - TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost()); - TaskID taskId = info.getTaskID(); - TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(taskId); - Assert.assertNotNull(ttTaskInfo); - Assert.assertNotNull(ttTaskInfo.getConf()); - Assert.assertNotNull(ttTaskInfo.getUser()); - Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0); - Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0); - // Get the pid of the task attempt. The task need not have - // reported the pid of the task by the time we are checking - // the pid. So perform null check. - String pid = ttTaskInfo.getPid(); - int i = 1; - while (pid.isEmpty()) { - Thread.sleep(1000); - LOG.info("Waiting for task to report its pid back"); - ttTaskInfo = ttCli.getProxy().getTask(taskId); - pid = ttTaskInfo.getPid(); - if (i == 40) { - Assert.fail("The task pid not reported for 40 seconds."); - } - i++; - } - if (!isOneTaskStored) { - sometaskpid = pid; - sometaskId = ttTaskInfo.getTaskStatus().getTaskID(); - myCli = ttCli; - isOneTaskStored = true; - } - LOG.info("verified task progress to be between 0 and 1"); - State state = ttTaskInfo.getTaskStatus().getRunState(); - if (ttTaskInfo.getTaskStatus().getProgress() < 1.0 - && ttTaskInfo.getTaskStatus().getProgress() > 0.0) { - Assert.assertEquals(TaskStatus.State.RUNNING, state); - LOG.info("verified run state as " + state); - } - FinishTaskControlAction action = - new FinishTaskControlAction(org.apache.hadoop.mapred.TaskID - .downgrade(info.getTaskID())); - ttCli.getProxy().sendAction(action); - } - } - } - rJob.killJob(); - int i = 1; - while (!rJob.isComplete()) { - Thread.sleep(1000); - if (i == 40) { - Assert - .fail("The job not completed within 40 seconds after killing it."); - } - i++; - } - TTTaskInfo myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID()); - i = 0; - while (myTaskInfo != null && !myTaskInfo.getPid().isEmpty()) { - LOG.info("sleeping till task is retired from TT memory"); - Thread.sleep(1000); - myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID()); - if (i == 40) { - Assert - .fail("Task not retired from TT memory within 40 seconds of job completeing"); - } - i++; - } - Assert.assertFalse(myCli.getProxy().isProcessTreeAlive(sometaskpid)); - } - - @Test - public void testClusterRestart() throws Exception { - cluster.stop(); - // Give the cluster time to stop the whole cluster. - AbstractDaemonClient cli = cluster.getJTClient(); - int i = 1; - while (i < 40) { - try { - cli.ping(); - Thread.sleep(1000); - i++; - } catch (Exception e) { - break; - } - } - if (i >= 40) { - Assert.fail("JT on " + cli.getHostName() + " Should have been down."); - } - i = 1; - for (AbstractDaemonClient tcli : cluster.getTTClients()) { - i = 1; - while (i < 40) { - try { - tcli.ping(); - Thread.sleep(1000); - i++; - } catch (Exception e) { - break; - } - } - if (i >= 40) { - Assert.fail("TT on " + tcli.getHostName() + " Should have been down."); - } - } - cluster.start(); - cli = cluster.getJTClient(); - i = 1; - while (i < 40) { - try { - cli.ping(); - break; - } catch (Exception e) { - i++; - Thread.sleep(1000); - LOG.info("Waiting for Jobtracker on host : " - + cli.getHostName() + " to come up."); - } - } - if (i >= 40) { - Assert.fail("JT on " + cli.getHostName() + " Should have been up."); - } - for (AbstractDaemonClient tcli : cluster.getTTClients()) { - i = 1; - while (i < 40) { - try { - tcli.ping(); - break; - } catch (Exception e) { - i++; - Thread.sleep(1000); - LOG.info("Waiting for Tasktracker on host : " - + tcli.getHostName() + " to come up."); - } - } - if (i >= 40) { - Assert.fail("TT on " + tcli.getHostName() + " Should have been Up."); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java deleted file mode 100644 index f43c6fd9357..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestControlledJob.java +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import junit.framework.Assert; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class TestControlledJob { - private MRCluster cluster; - - private static final Log LOG = LogFactory.getLog(TestControlledJob.class); - - public TestControlledJob() throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - } - - @Before - public void before() throws Exception { - cluster.setUp(); - } - - @After - public void after() throws Exception { - cluster.tearDown(); - } - - @Test - public void testControlledJob() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - FinishTaskControlAction.configureControlActionForJob(conf); - SleepJob job = new SleepJob(); - job.setConf(conf); - - Job slpJob = job.createJob(1, 0, 100, 100, 100, 100); - slpJob.submit(); - JobClient client = cluster.getJTClient().getClient(); - - RunningJob rJob = - client.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob - .getJobID())); - JobID id = rJob.getID(); - - JobInfo jInfo = wovenClient.getJobInfo(id); - - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - Thread.sleep(1000); - jInfo = wovenClient.getJobInfo(id); - } - - LOG.info("Waiting till job starts running one map"); - jInfo = wovenClient.getJobInfo(id); - Assert.assertEquals(jInfo.runningMaps(), 1); - - LOG.info("waiting for another cycle to " - + "check if the maps dont finish off"); - Thread.sleep(1000); - jInfo = wovenClient.getJobInfo(id); - Assert.assertEquals(jInfo.runningMaps(), 1); - - TaskInfo[] taskInfos = wovenClient.getTaskInfo(id); - - for (TaskInfo info : taskInfos) { - LOG.info("constructing control action to signal task to finish"); - FinishTaskControlAction action = - new FinishTaskControlAction(TaskID.downgrade(info.getTaskID())); - for (TTClient cli : cluster.getTTClients()) { - cli.getProxy().sendAction(action); - } - } - - jInfo = wovenClient.getJobInfo(id); - int i = 1; - if (jInfo != null) { - while (!jInfo.getStatus().isJobComplete()) { - Thread.sleep(1000); - jInfo = wovenClient.getJobInfo(id); - if (jInfo == null) { - break; - } - if (i > 40) { - Assert.fail("Controlled Job with ID : " - + jInfo.getID() - + " has not completed in 40 seconds after signalling."); - } - i++; - } - } - LOG.info("Job sucessfully completed after signalling!!!!"); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java deleted file mode 100644 index 35c08d00f84..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java +++ /dev/null @@ -1,346 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.net.URI; -import java.util.Collection; -import java.util.ArrayList; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.UtilsForTests; - -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.filecache.DistributedCache; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; - -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; - -/** - * Verify the Distributed Cache functionality. This test scenario is for a - * distributed cache file behaviour when it is modified before and after being - * accessed by maximum two jobs. Once a job uses a distributed cache file that - * file is stored in the mapred.local.dir. If the next job uses the same file, - * but with differnt timestamp, then that file is stored again. So, if two jobs - * choose the same tasktracker for their job execution then, the distributed - * cache file should be found twice. - * - * This testcase runs a job with a distributed cache file. All the tasks' - * corresponding tasktracker's handle is got and checked for the presence of - * distributed cache with proper permissions in the proper directory. Next when - * job runs again and if any of its tasks hits the same tasktracker, which ran - * one of the task of the previous job, then that file should be uploaded again - * and task should not use the old file. This is verified. - */ - -public class TestDistributedCacheModifiedFile { - - private static MRCluster cluster = null; - private static FileSystem dfs = null; - private static FileSystem ttFs = null; - private static JobClient client = null; - private static FsPermission permission = new FsPermission((short) 00777); - - private static String uriPath = "hdfs:///tmp/test.txt"; - private static final Path URIPATH = new Path(uriPath); - private String distributedFileName = "test.txt"; - - static final Log LOG = - LogFactory.getLog(TestDistributedCacheModifiedFile.class); - - public TestDistributedCacheModifiedFile() throws Exception { - } - - @BeforeClass - public static void setUp() throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - client = cluster.getJTClient().getClient(); - dfs = client.getFs(); - // Deleting the file if it already exists - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - // Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - // Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - // Waiting for 5 seconds to make sure tasktrackers are ready - Thread.sleep(5000); - } - - @AfterClass - public static void tearDown() throws Exception { - cluster.tearDown(); - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - // Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - // Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - } - - @Test - /** - * This tests Distributed Cache for modified file - * @param none - * @return void - */ - public void testDistributedCache() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - - // This counter will check for count of a loop, - // which might become infinite. - int count = 0; - // This boolean will decide whether to run job again - boolean continueLoop = true; - // counter for job Loop - int countLoop = 0; - // This counter increases with all the tasktrackers in which tasks ran - int taskTrackerCounter = 0; - // This will store all the tasktrackers in which tasks ran - ArrayList taskTrackerCollection = new ArrayList(); - // This boolean tells if two tasks ran onteh same tasktracker or not - boolean taskTrackerFound = false; - - do { - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100); - - // Before starting, Modify the file - String input = "This will be the content of\n" + "distributed cache\n"; - // Creating the path with the file - DataOutputStream file = - UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input); - - DistributedCache.createSymlink(conf); - URI uri = URI.create(uriPath); - DistributedCache.addCacheFile(uri, conf); - JobConf jconf = new JobConf(conf); - - // Controls the job till all verification is done - FinishTaskControlAction.configureControlActionForJob(conf); - - slpJob.submit(); - // Submitting the job - RunningJob rJob = - cluster.getJTClient().getClient().getJob( - org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID())); - - // counter for job Loop - countLoop++; - - TTClient tClient = null; - JobInfo jInfo = wovenClient.getJobInfo(rJob.getID()); - LOG.info("jInfo is :" + jInfo); - - // Assert if jobInfo is null - Assert.assertNotNull("jobInfo is null", jInfo); - - // Wait for the job to start running. - count = 0; - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - UtilsForTests.waitFor(10000); - count++; - jInfo = wovenClient.getJobInfo(rJob.getID()); - // If the count goes beyond a point, then break; This is to avoid - // infinite loop under unforeseen circumstances. Testcase will anyway - // fail later. - if (count > 10) { - Assert.fail("job has not reached running state for more than" - + "100 seconds. Failing at this point"); - } - } - - LOG.info("job id is :" + rJob.getID().toString()); - - TaskInfo[] taskInfos = - cluster.getJTClient().getProxy().getTaskInfo(rJob.getID()); - - boolean distCacheFileIsFound; - - for (TaskInfo taskInfo : taskInfos) { - distCacheFileIsFound = false; - String[] taskTrackers = taskInfo.getTaskTrackers(); - for (String taskTracker : taskTrackers) { - // Formatting tasktracker to get just its FQDN - taskTracker = UtilsForTests.getFQDNofTT(taskTracker); - LOG.info("taskTracker is :" + taskTracker); - - // The tasktrackerFound variable is initialized - taskTrackerFound = false; - - // This will be entered from the second job onwards - if (countLoop > 1) { - if (taskTracker != null) { - continueLoop = taskTrackerCollection.contains(taskTracker); - } - if (continueLoop) { - taskTrackerFound = true; - } - } - // Collecting the tasktrackers - if (taskTracker != null) - taskTrackerCollection.add(taskTracker); - - // we have loopped through two times to look for task - // getting submitted on same tasktrackers.The same tasktracker - // for subsequent jobs was not hit maybe because of many number - // of tasktrackers. So, testcase has to stop here. - if (countLoop > 1) { - continueLoop = false; - } - - tClient = cluster.getTTClient(taskTracker); - - // tClient maybe null because the task is already dead. Ex: setup - if (tClient == null) { - continue; - } - - String[] localDirs = tClient.getMapredLocalDirs(); - int distributedFileCount = 0; - // Go to every single path - for (String localDir : localDirs) { - // Public Distributed cache will always be stored under - // mapre.local.dir/tasktracker/archive - localDir = - localDir - + Path.SEPARATOR - + TaskTracker.getPublicDistributedCacheDir(); - LOG.info("localDir is : " + localDir); - - // Get file status of all the directories - // and files under that path. - FileStatus[] fileStatuses = - tClient.listStatus(localDir, true, true); - for (FileStatus fileStatus : fileStatuses) { - Path path = fileStatus.getPath(); - LOG.info("path is :" + path.toString()); - // Checking if the received path ends with - // the distributed filename - distCacheFileIsFound = - (path.toString()).endsWith(distributedFileName); - // If file is found, check for its permission. - // Since the file is found break out of loop - if (distCacheFileIsFound) { - LOG.info("PATH found is :" + path.toString()); - distributedFileCount++; - String filename = path.getName(); - FsPermission fsPerm = fileStatus.getPermission(); - Assert.assertTrue("File Permission is not 777", fsPerm - .equals(new FsPermission("777"))); - } - } - } - - LOG.debug("The distributed FileCount is :" + distributedFileCount); - LOG.debug("The taskTrackerFound is :" + taskTrackerFound); - - // If distributed cache is modified in dfs - // between two job runs, it can be present more than once - // in any of the task tracker, in which job ran. - if (distributedFileCount != 2 && taskTrackerFound) { - Assert.fail("The distributed cache file has to be two. " - + "But found was " + distributedFileCount); - } else if (distributedFileCount > 1 && !taskTrackerFound) { - Assert.fail("The distributed cache file cannot more than one." - + " But found was " + distributedFileCount); - } else if (distributedFileCount < 1) - Assert.fail("The distributed cache file is less than one. " - + "But found was " + distributedFileCount); - if (!distCacheFileIsFound) { - Assert.assertEquals( - "The distributed cache file does not exist", - distCacheFileIsFound, false); - } - } - } - // Allow the job to continue through MR control job. - for (TaskInfo taskInfoRemaining : taskInfos) { - FinishTaskControlAction action = - new FinishTaskControlAction(TaskID.downgrade(taskInfoRemaining - .getTaskID())); - Collection tts = cluster.getTTClients(); - for (TTClient cli : tts) { - cli.getProxy().sendAction(action); - } - } - - // Killing the job because all the verification needed - // for this testcase is completed. - rJob.killJob(); - - // Waiting for 3 seconds for cleanup to start - Thread.sleep(3000); - - // Getting the last cleanup task's tasktracker also, as - // distributed cache gets uploaded even during cleanup. - TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID()); - if (myTaskInfos != null) { - for (TaskInfo info : myTaskInfos) { - if (info.isSetupOrCleanup()) { - String[] taskTrackers = info.getTaskTrackers(); - for (String taskTracker : taskTrackers) { - // Formatting tasktracker to get just its FQDN - taskTracker = UtilsForTests.getFQDNofTT(taskTracker); - LOG.info("taskTracker is :" + taskTracker); - // Collecting the tasktrackers - if (taskTracker != null) - taskTrackerCollection.add(taskTracker); - } - } - } - } - - // Making sure that the job is complete. - while (jInfo != null && !jInfo.getStatus().isJobComplete()) { - Thread.sleep(10000); - jInfo = wovenClient.getJobInfo(rJob.getID()); - } - - } while (continueLoop); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java deleted file mode 100644 index 5d8ff49a370..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java +++ /dev/null @@ -1,284 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.net.URI; -import java.util.Collection; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.UtilsForTests; - -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.filecache.DistributedCache; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; - -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; - -/** - * Verify the Distributed Cache functionality. - * This test scenario is for a distributed cache file behaviour - * when the file is private. Once a job uses a distributed - * cache file with private permissions that file is stored in the - * mapred.local.dir, under the directory which has the same name - * as job submitter's username. The directory has 700 permission - * and the file under it, should have 777 permissions. -*/ - -public class TestDistributedCachePrivateFile { - - private static MRCluster cluster = null; - private static FileSystem dfs = null; - private static JobClient client = null; - private static FsPermission permission = new FsPermission((short)00770); - - private static String uriPath = "hdfs:///tmp/test.txt"; - private static final Path URIPATH = new Path(uriPath); - private String distributedFileName = "test.txt"; - - static final Log LOG = LogFactory. - getLog(TestDistributedCachePrivateFile.class); - - public TestDistributedCachePrivateFile() throws Exception { - } - - @BeforeClass - public static void setUp() throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - client = cluster.getJTClient().getClient(); - dfs = client.getFs(); - //Deleting the file if it already exists - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - //Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - //Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - - String input = "This will be the content of\n" + "distributed cache\n"; - //Creating the path with the file - DataOutputStream file = - UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input); - } - - @AfterClass - public static void tearDown() throws Exception { - cluster.tearDown(); - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - //Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - //Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - } - - @Test - /** - * This tests Distributed Cache for private file - * @param none - * @return void - */ - public void testDistributedCache() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - - //This counter will check for count of a loop, - //which might become infinite. - int count = 0; - - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100); - - DistributedCache.createSymlink(conf); - URI uri = URI.create(uriPath); - DistributedCache.addCacheFile(uri, conf); - JobConf jconf = new JobConf(conf); - - //Controls the job till all verification is done - FinishTaskControlAction.configureControlActionForJob(conf); - - //Submitting the job - slpJob.submit(); - RunningJob rJob = - cluster.getJTClient().getClient().getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID())); - - JobStatus[] jobStatus = client.getAllJobs(); - String userName = jobStatus[0].getUsername(); - - TTClient tClient = null; - JobInfo jInfo = wovenClient.getJobInfo(rJob.getID()); - LOG.info("jInfo is :" + jInfo); - - //Assert if jobInfo is null - Assert.assertNotNull("jobInfo is null", jInfo); - - //Wait for the job to start running. - count = 0; - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - UtilsForTests.waitFor(10000); - count++; - jInfo = wovenClient.getJobInfo(rJob.getID()); - //If the count goes beyond a point, then Assert; This is to avoid - //infinite loop under unforeseen circumstances. - if (count > 10) { - Assert.fail("job has not reached running state for more than" + - "100 seconds. Failing at this point"); - } - } - - LOG.info("job id is :" + rJob.getID().toString()); - - TaskInfo[] taskInfos = cluster.getJTClient().getProxy() - .getTaskInfo(rJob.getID()); - - boolean distCacheFileIsFound; - - for (TaskInfo taskInfo : taskInfos) { - distCacheFileIsFound = false; - String[] taskTrackers = taskInfo.getTaskTrackers(); - - for(String taskTracker : taskTrackers) { - //Getting the exact FQDN of the tasktracker from - //the tasktracker string. - taskTracker = UtilsForTests.getFQDNofTT(taskTracker); - tClient = cluster.getTTClient(taskTracker); - String[] localDirs = tClient.getMapredLocalDirs(); - int distributedFileCount = 0; - String localDirOnly = null; - - boolean FileNotPresentForThisDirectoryPath = false; - - //Go to every single path - for (String localDir : localDirs) { - FileNotPresentForThisDirectoryPath = false; - localDirOnly = localDir; - - //Public Distributed cache will always be stored under - //mapred.local.dir/tasktracker/archive - localDirOnly = localDir + Path.SEPARATOR + TaskTracker.SUBDIR + - Path.SEPARATOR + userName; - - //Private Distributed cache will always be stored under - //mapre.local.dir/taskTracker//distcache - //Checking for username directory to check if it has the - //proper permissions - localDir = localDir + Path.SEPARATOR + - TaskTracker.getPrivateDistributedCacheDir(userName); - - FileStatus fileStatusMapredLocalDirUserName = null; - - try { - fileStatusMapredLocalDirUserName = tClient. - getFileStatus(localDirOnly, true); - } catch (Exception e) { - LOG.info("LocalDirOnly :" + localDirOnly + " not found"); - FileNotPresentForThisDirectoryPath = true; - } - - //File will only be stored under one of the mapred.lcoal.dir - //If other paths were hit, just continue - if (FileNotPresentForThisDirectoryPath) - continue; - - Path pathMapredLocalDirUserName = - fileStatusMapredLocalDirUserName.getPath(); - FsPermission fsPermMapredLocalDirUserName = - fileStatusMapredLocalDirUserName.getPermission(); - Assert.assertTrue("Directory Permission is not 700", - fsPermMapredLocalDirUserName.equals(new FsPermission("700"))); - - //Get file status of all the directories - //and files under that path. - FileStatus[] fileStatuses = tClient.listStatus(localDir, - true, true); - for (FileStatus fileStatus : fileStatuses) { - Path path = fileStatus.getPath(); - LOG.info("path is :" + path.toString()); - //Checking if the received path ends with - //the distributed filename - distCacheFileIsFound = (path.toString()). - endsWith(distributedFileName); - //If file is found, check for its permission. - //Since the file is found break out of loop - if (distCacheFileIsFound){ - LOG.info("PATH found is :" + path.toString()); - distributedFileCount++; - String filename = path.getName(); - FsPermission fsPerm = fileStatus.getPermission(); - Assert.assertTrue("File Permission is not 777", - fsPerm.equals(new FsPermission("777"))); - } - } - } - - LOG.info("Distributed File count is :" + distributedFileCount); - - if (distributedFileCount > 1) { - Assert.fail("The distributed cache file is more than one"); - } else if (distributedFileCount < 1) - Assert.fail("The distributed cache file is less than one"); - if (!distCacheFileIsFound) { - Assert.assertEquals("The distributed cache file does not exist", - distCacheFileIsFound, false); - } - } - - //Allow the job to continue through MR control job. - for (TaskInfo taskInfoRemaining : taskInfos) { - FinishTaskControlAction action = new FinishTaskControlAction(TaskID - .downgrade(taskInfoRemaining.getTaskID())); - Collection tts = cluster.getTTClients(); - for (TTClient cli : tts) { - cli.getProxy().sendAction(action); - } - } - - //Killing the job because all the verification needed - //for this testcase is completed. - rJob.killJob(); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java deleted file mode 100644 index 7a18d64c35d..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java +++ /dev/null @@ -1,305 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.net.URI; -import java.util.Collection; -import java.util.ArrayList; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.Log; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.UtilsForTests; - -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.filecache.DistributedCache; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; - -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; - -/** - * Verify the Distributed Cache functionality. This test scenario is for a - * distributed cache file behaviour when it is not modified before and after - * being accessed by maximum two jobs. Once a job uses a distributed cache file - * that file is stored in the mapred.local.dir. If the next job uses the same - * file, then that is not stored again. So, if two jobs choose the same - * tasktracker for their job execution then, the distributed cache file should - * not be found twice. - * - * This testcase runs a job with a distributed cache file. All the tasks' - * corresponding tasktracker's handle is got and checked for the presence of - * distributed cache with proper permissions in the proper directory. Next when - * job runs again and if any of its tasks hits the same tasktracker, which ran - * one of the task of the previous job, then that file should not be uploaded - * again and task use the old file. This is verified. - */ - -public class TestDistributedCacheUnModifiedFile { - - private static MRCluster cluster = null; - private static FileSystem dfs = null; - private static FileSystem ttFs = null; - private static JobClient client = null; - private static FsPermission permission = new FsPermission((short) 00777); - - private static String uriPath = "hdfs:///tmp/test.txt"; - private static final Path URIPATH = new Path(uriPath); - private String distributedFileName = "test.txt"; - - static final Log LOG = - LogFactory.getLog(TestDistributedCacheUnModifiedFile.class); - - public TestDistributedCacheUnModifiedFile() throws Exception { - } - - @BeforeClass - public static void setUp() throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - client = cluster.getJTClient().getClient(); - dfs = client.getFs(); - // Deleting the file if it already exists - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - // Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - // Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - - // Waiting for 5 seconds to make sure tasktrackers are ready - Thread.sleep(5000); - - String input = "This will be the content of\n" + "distributed cache\n"; - // Creating the path with the file - DataOutputStream file = - UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input); - } - - @AfterClass - public static void tearDown() throws Exception { - cluster.tearDown(); - dfs.delete(URIPATH, true); - - Collection tts = cluster.getTTClients(); - // Stopping all TTs - for (TTClient tt : tts) { - tt.kill(); - } - // Starting all TTs - for (TTClient tt : tts) { - tt.start(); - } - } - - @Test - /** - * This tests Distributed Cache for unmodified file - * @param none - * @return void - */ - public void testDistributedCache() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - - // This counter will check for count of a loop, - // which might become infinite. - int count = 0; - // This boolean will decide whether to run job again - boolean continueLoop = true; - // counter for job Loop - int countLoop = 0; - // This counter incerases with all the tasktrackers in which tasks ran - int taskTrackerCounter = 0; - // This will store all the tasktrackers in which tasks ran - ArrayList taskTrackerCollection = new ArrayList(); - - do { - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(5, 1, 1000, 1000, 100, 100); - - DistributedCache.createSymlink(conf); - URI uri = URI.create(uriPath); - DistributedCache.addCacheFile(uri, conf); - JobConf jconf = new JobConf(conf); - - // Controls the job till all verification is done - FinishTaskControlAction.configureControlActionForJob(conf); - - // Submitting the job - slpJob.submit(); - RunningJob rJob = - cluster.getJTClient().getClient().getJob( - org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID())); - - // counter for job Loop - countLoop++; - - TTClient tClient = null; - JobInfo jInfo = wovenClient.getJobInfo(rJob.getID()); - LOG.info("jInfo is :" + jInfo); - - // Assert if jobInfo is null - Assert.assertNotNull("jobInfo is null", jInfo); - - // Wait for the job to start running. - count = 0; - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - UtilsForTests.waitFor(10000); - count++; - jInfo = wovenClient.getJobInfo(rJob.getID()); - // If the count goes beyond a point, then break; This is to avoid - // infinite loop under unforeseen circumstances. Testcase will anyway - // fail later. - if (count > 10) { - Assert.fail("job has not reached running state for more than" - + "100 seconds. Failing at this point"); - } - } - - LOG.info("job id is :" + rJob.getID().toString()); - - TaskInfo[] taskInfos = - cluster.getJTClient().getProxy().getTaskInfo(rJob.getID()); - - boolean distCacheFileIsFound; - - for (TaskInfo taskInfo : taskInfos) { - distCacheFileIsFound = false; - String[] taskTrackers = taskInfo.getTaskTrackers(); - for (String taskTracker : taskTrackers) { - // Formatting tasktracker to get just its FQDN - taskTracker = UtilsForTests.getFQDNofTT(taskTracker); - LOG.info("taskTracker is :" + taskTracker); - - // This will be entered from the second job onwards - if (countLoop > 1) { - if (taskTracker != null) { - continueLoop = taskTrackerCollection.contains(taskTracker); - } - if (!continueLoop) { - break; - } - } - - // Collecting the tasktrackers - if (taskTracker != null) - taskTrackerCollection.add(taskTracker); - - // we have loopped through enough number of times to look for task - // getting submitted on same tasktrackers.The same tasktracker - // for subsequent jobs was not hit maybe because of many number - // of tasktrackers. So, testcase has to stop here. - if (countLoop > 2) { - continueLoop = false; - } - - tClient = cluster.getTTClient(taskTracker); - - // tClient maybe null because the task is already dead. Ex: setup - if (tClient == null) { - continue; - } - - String[] localDirs = tClient.getMapredLocalDirs(); - int distributedFileCount = 0; - // Go to every single path - for (String localDir : localDirs) { - // Public Distributed cache will always be stored under - // mapre.local.dir/tasktracker/archive - localDir = - localDir - + Path.SEPARATOR - + TaskTracker.getPublicDistributedCacheDir(); - LOG.info("localDir is : " + localDir); - - // Get file status of all the directories - // and files under that path. - FileStatus[] fileStatuses = - tClient.listStatus(localDir, true, true); - for (FileStatus fileStatus : fileStatuses) { - Path path = fileStatus.getPath(); - LOG.info("path is :" + path.toString()); - // Checking if the received path ends with - // the distributed filename - distCacheFileIsFound = - (path.toString()).endsWith(distributedFileName); - // If file is found, check for its permission. - // Since the file is found break out of loop - if (distCacheFileIsFound) { - LOG.info("PATH found is :" + path.toString()); - distributedFileCount++; - String filename = path.getName(); - FsPermission fsPerm = fileStatus.getPermission(); - Assert.assertTrue("File Permission is not 777", fsPerm - .equals(new FsPermission("777"))); - } - } - } - - // Since distributed cache is unmodified in dfs - // between two job runs, it should not be present more than once - // in any of the task tracker, in which job ran. - if (distributedFileCount > 1) { - Assert.fail("The distributed cache file is more than one"); - } else if (distributedFileCount < 1) - Assert.fail("The distributed cache file is less than one"); - if (!distCacheFileIsFound) { - Assert.assertEquals( - "The distributed cache file does not exist", - distCacheFileIsFound, false); - } - } - } - // Allow the job to continue through MR control job. - for (TaskInfo taskInfoRemaining : taskInfos) { - FinishTaskControlAction action = - new FinishTaskControlAction(TaskID.downgrade(taskInfoRemaining - .getTaskID())); - Collection tts = cluster.getTTClients(); - for (TTClient cli : tts) { - cli.getProxy().sendAction(action); - } - } - - // Killing the job because all the verification needed - // for this testcase is completed. - rJob.killJob(); - } while (continueLoop); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java deleted file mode 100644 index f8f2cdabeb4..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestFileOwner.java +++ /dev/null @@ -1,225 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.TTInfo; -import org.apache.hadoop.mapreduce.test.system.TTTaskInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestFileOwner { - public static MRCluster cluster; - - private StringBuffer jobIdDir = new StringBuffer(); - private JTProtocol wovenClient = null; - private static final Log LOG = LogFactory.getLog(TestFileOwner.class); - private String taskController = null; - private final FsPermission PERM_777 = new FsPermission("777"); - private final FsPermission PERM_755 = new FsPermission("755"); - private final FsPermission PERM_644 = new FsPermission("644"); - - @BeforeClass - public static void setUp() throws java.lang.Exception { - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - } - - /* - * The test is used to check the file permission of local files in - * mapred.local.dir. The job control is used which will make the tasks wait - * for completion until it is signaled - * - * @throws Exception in case of test errors - */ - @Test - public void testFilePermission() throws Exception { - wovenClient = cluster.getJTClient().getProxy(); - Configuration conf = new Configuration(cluster.getConf()); - FinishTaskControlAction.configureControlActionForJob(conf); - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(1, 0, 100, 100, 100, 100); - JobConf jconf = new JobConf(conf); - slpJob.submit(); - RunningJob rJob = - cluster.getJTClient().getClient().getJob( - org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID())); - taskController = conf.get(TTConfig.TT_TASK_CONTROLLER); - // get the job info so we can get the env variables from the daemon. - // Now wait for the task to be in the running state, only then the - // directories will be created - JobInfo info = wovenClient.getJobInfo(rJob.getID()); - Assert.assertNotNull("JobInfo is null", info); - JobID id = rJob.getID(); - while (info.runningMaps() != 1) { - Thread.sleep(1000); - info = wovenClient.getJobInfo(id); - } - TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id); - for (TaskInfo tInfo : myTaskInfos) { - if (!tInfo.isSetupOrCleanup()) { - String[] taskTrackers = tInfo.getTaskTrackers(); - for (String taskTracker : taskTrackers) { - TTInfo ttInfo = wovenClient.getTTInfo(taskTracker); - TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost()); - Assert.assertNotNull("TTClient instance is null", ttCli); - TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID()); - Assert.assertNotNull("TTTaskInfo is null", ttTaskInfo); - while (ttTaskInfo.getTaskStatus().getRunState() != TaskStatus.State.RUNNING) { - Thread.sleep(100); - ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID()); - } - testPermissionWithTaskController(ttCli, conf, info); - FinishTaskControlAction action = - new FinishTaskControlAction(TaskID.downgrade(tInfo.getTaskID())); - for (TTClient cli : cluster.getTTClients()) { - cli.getProxy().sendAction(action); - } - } - } - } - JobInfo jInfo = wovenClient.getJobInfo(id); - jInfo = cluster.getJTClient().getProxy().getJobInfo(id); - while (!jInfo.getStatus().isJobComplete()) { - Thread.sleep(100); - jInfo = cluster.getJTClient().getProxy().getJobInfo(id); - } - } - - private void testPermissionWithTaskController( - TTClient tClient, Configuration conf, JobInfo info) { - Assert.assertNotNull("TTclient is null", tClient); - FsPermission fsPerm = null; - String[] pathInfo = conf.getStrings(MRConfig.LOCAL_DIR); - for (int i = 0; i < pathInfo.length; i++) { - // First verify the jobid directory exists - jobIdDir = new StringBuffer(); - String userName = null; - try { - JobStatus[] jobStatus = cluster.getJTClient().getClient().getAllJobs(); - userName = jobStatus[0].getUsername(); - } catch (Exception ex) { - LOG.error("Failed to get user name"); - boolean status = false; - Assert.assertTrue("Failed to get the userName", status); - } - jobIdDir.append(pathInfo[i]).append(Path.SEPARATOR); - jobIdDir.append(TaskTracker.getLocalJobDir(userName, info - .getID().toString())); - FileStatus[] fs = null; - try { - fs = tClient.listStatus(jobIdDir.toString(), true); - } catch (Exception ex) { - LOG.error("Failed to get the jobIdDir files " + ex); - } - Assert.assertEquals("Filestatus length is zero", fs.length != 0, true); - for (FileStatus file : fs) { - try { - String filename = file.getPath().getName(); - if (filename.equals(TaskTracker.JOBFILE)) { - if (taskController == DefaultTaskController.class.getName()) { - fsPerm = file.getPermission(); - Assert.assertTrue("FilePermission failed for " + filename, fsPerm - .equals(PERM_777)); - } - } - if (filename.startsWith("attempt")) { - StringBuffer attemptDir = new StringBuffer(jobIdDir); - attemptDir.append(Path.SEPARATOR).append(filename); - if (tClient.getFileStatus(attemptDir.toString(), true) != null) { - FileStatus[] attemptFs = - tClient.listStatus(attemptDir.toString(), true, true); - for (FileStatus attemptfz : attemptFs) { - Assert.assertNotNull("FileStatus is null", attemptfz); - fsPerm = attemptfz.getPermission(); - Assert.assertNotNull("FsPermission is null", fsPerm); - if (taskController == DefaultTaskController.class.getName()) { - if (!attemptfz.isDir()) { - Assert.assertTrue( - "FilePermission failed for " + filename, fsPerm - .equals(PERM_777)); - } else { - Assert.assertTrue( - "FilePermission failed for " + filename, fsPerm - .equals(PERM_755)); - } - } - } - } - } - if (filename.equals(TaskTracker.TASKJARDIR)) { - StringBuffer jarsDir = new StringBuffer(jobIdDir); - jarsDir.append(Path.SEPARATOR).append(filename); - FileStatus[] jarsFs = - tClient.listStatus(jarsDir.toString(), true, true); - for (FileStatus jarsfz : jarsFs) { - Assert.assertNotNull("FileStatus is null", jarsfz); - fsPerm = jarsfz.getPermission(); - Assert.assertNotNull("File permission is null", fsPerm); - if (taskController == DefaultTaskController.class.getName()) { - if (!jarsfz.isDir()) { - if (jarsfz.getPath().getName().equals("job.jar")) { - Assert.assertTrue( - "FilePermission failed for " + filename, fsPerm - .equals(PERM_777)); - } else { - Assert.assertTrue( - "FilePermission failed for " + filename, fsPerm - .equals(PERM_644)); - } - } else { - Assert.assertTrue( - "FilePermission failed for " + filename, fsPerm - .equals(PERM_755)); - } - } - } - } - } catch (Exception ex) { - LOG.error("The exception occurred while searching for nonexsistent" - + "file, ignoring and continuing. " + ex); - } - }// for loop ends - }// for loop ends - } - - @AfterClass - public static void tearDown() throws java.lang.Exception { - cluster.tearDown(); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java deleted file mode 100644 index 92c07b2d2f5..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestJobKill.java +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.*; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.Mapper; -import org.apache.hadoop.mapreduce.Reducer; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import testjar.JobKillCommitter; - -public class TestJobKill { - private static final Log LOG = LogFactory.getLog(TestJobKill.class); - private JTProtocol wovenClient = null; - private static Path outDir = new Path("output"); - private static Path inDir = new Path("input"); - private static FileSystem fs = null; - private static MRCluster cluster; - - @BeforeClass - public static void setUp() throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - fs = inDir.getFileSystem(cluster.getJTClient().getConf()); - if(!fs.exists(inDir)){ - fs.create(inDir); - } - if (fs.exists(outDir)) { - fs.delete(outDir,true); - } - } - - @AfterClass - public static void tearDown() throws Exception { - if(fs.exists(inDir)) { - fs.delete(inDir,true); - } - if (fs.exists(outDir)) { - fs.delete(outDir,true); - } - cluster.tearDown(); - } - - /* - * The test case intention is to test the job failure due to system - * exceptions, so the exceptions are thrown intentionally and the job is - * verified for failure. At the end of the test, the verification is made - * that the success file is not present in the hdfs location. This is because - * the success file only should exist if the actual job had succeeded. - * - * @throws Exception in a case of test errors - */ - @Test - public void testSystemJobKill() throws Exception { - wovenClient = cluster.getJTClient().getProxy(); - Configuration conf = new Configuration(cluster.getConf()); - conf.set(MRJobConfig.MAP_MAX_ATTEMPTS, "1"); - conf.set(MRJobConfig.REDUCE_MAX_ATTEMPTS, "1"); - // fail the mapper job - failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobMapperFail", - JobKillCommitter.MapperFail.class, JobKillCommitter.ReducerPass.class, - false); - // fail the reducer job - failJob(conf, JobKillCommitter.CommitterWithNoError.class, - "JobReducerFail", JobKillCommitter.MapperPass.class, - JobKillCommitter.ReducerFail.class,false); - // fail the set up job - failJob(conf, JobKillCommitter.CommitterWithFailSetup.class, - "JobSetupFail", JobKillCommitter.MapperPass.class, - JobKillCommitter.ReducerPass.class,false); - // fail the clean up job - failJob(conf, JobKillCommitter.CommitterWithFailCleanup.class, - "JobCleanupFail", JobKillCommitter.MapperPass.class, - JobKillCommitter.ReducerPass.class,false); - } - - private void failJob(Configuration conf, - Class theClass, String confName, - Class mapClass, Class redClass, - boolean isUserKill) - throws Exception { - Job job = new Job(conf, confName); - job.setJarByClass(JobKillCommitter.class); - job.setMapperClass(mapClass); - job.setCombinerClass(redClass); - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Text.class); - job.setReducerClass(redClass); - job.setNumReduceTasks(1); - FileInputFormat.addInputPath(job, inDir); - FileOutputFormat.setOutputPath(job, outDir); - JobConf jconf = new JobConf(job.getConfiguration(), JobKillCommitter.class); - jconf.setOutputCommitter(theClass); - if(!isUserKill) - { - RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf); - JobID id = rJob.getID(); - JobInfo jInfo = wovenClient.getJobInfo(id); - Assert.assertTrue("Job is not in PREP state", - jInfo.getStatus().getRunState() == JobStatus.PREP); - } - else - { - //user kill job - RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf); - JobInfo info = wovenClient.getJobInfo(rJob.getID()); - Assert.assertNotNull("Job Info is null",info); - JobID id = rJob.getID(); - while (info.runningMaps() != 1) { - Thread.sleep(1000); - info = wovenClient.getJobInfo(id); - } - rJob.killJob(); - } - checkCleanup(jconf); - deleteOutputDir(); - } - - /** - * This test is used to kill the job by explicity calling the kill api - * and making sure the clean up happens - * @throws Exception - */ - @Test - public void testUserJobKill() throws Exception{ - wovenClient = cluster.getJTClient().getProxy(); - Configuration conf = new Configuration(cluster.getConf()); - conf.set(MRJobConfig.MAP_MAX_ATTEMPTS, "1"); - conf.set(MRJobConfig.REDUCE_MAX_ATTEMPTS, "1"); - // fail the mapper job - failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobUserKill", - JobKillCommitter.MapperPassSleep.class, - JobKillCommitter.ReducerPass.class,true); - } - - private void checkCleanup(JobConf conf) throws Exception { - if (outDir != null) { - if (fs.exists(outDir)) { - Path filePath = new Path(outDir, - FileOutputCommitter.SUCCEEDED_FILE_NAME); - // check to make sure the success file is not there since the job - // failed. - Assert.assertTrue("The success file is present when the job failed", - !fs.exists(filePath)); - } - } - } - - private void deleteOutputDir() throws Exception { - if (fs != null) { - fs.delete(outDir, true); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java deleted file mode 100644 index 293edc0984a..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestPushConfig.java +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; -import java.io.File; -import java.io.FileOutputStream; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.test.system.AbstractDaemonClient; -import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -public class TestPushConfig { - private static MRCluster cluster; - private String localConfDir = "localconf"; - private static final Log LOG = LogFactory.getLog( - TestPushConfig.class.getName()); - - @BeforeClass - public static void before() throws Exception { - String [] expExcludeList = new String[2]; - expExcludeList[0] = "java.net.ConnectException"; - expExcludeList[1] = "java.io.IOException"; - - cluster = MRCluster.createCluster(new Configuration()); - cluster.setExcludeExpList(expExcludeList); - cluster.setUp(); - } - - @AfterClass - public static void after() throws Exception { - cluster.tearDown(); - } - - /** - * This test about testing the pushConfig feature. The pushConfig functionality - * available as part of the cluster process manager. The functionality takes - * in local input directory and pushes all the files from the local to the - * remote conf directory. This functionality is required is change the config - * on the fly and restart the cluster which will be used by other test cases - * @throws Exception is thrown if pushConfig fails. - */ - @Test - public void testPushConfig() throws Exception { - final String DUMMY_CONFIG_STRING = "mapreduce.newdummy.conf"; - final String DUMMY_CONFIG_STRING_VALUE = "HerriotTestRules"; - Configuration origconf = new Configuration(cluster.getConf()); - origconf.set(DUMMY_CONFIG_STRING, DUMMY_CONFIG_STRING_VALUE); - String localDir = HadoopDaemonRemoteCluster.getDeployedHadoopConfDir() + - File.separator + localConfDir; - File lFile = new File(localDir); - if(!lFile.exists()){ - lFile.mkdir(); - } - String mapredConf = localDir + File.separator + "mapred-site.xml"; - File file = new File(mapredConf); - origconf.writeXml(new FileOutputStream(file)); - Configuration daemonConf = cluster.getJTClient().getProxy().getDaemonConf(); - Assert.assertTrue("Dummy varialble is expected to be null before restart.", - daemonConf.get(DUMMY_CONFIG_STRING) == null); - String newDir = cluster.getClusterManager().pushConfig(localDir); - cluster.stop(); - AbstractDaemonClient cli = cluster.getJTClient(); - waitForClusterStop(cli); - // make sure the cluster has actually stopped - cluster.getClusterManager().start(newDir); - cli = cluster.getJTClient(); - waitForClusterStart(cli); - // make sure the cluster has actually started - Configuration newconf = cluster.getJTClient().getProxy().getDaemonConf(); - Assert.assertTrue("Extra varialble is expected to be set", - newconf.get(DUMMY_CONFIG_STRING).equals(DUMMY_CONFIG_STRING_VALUE)); - cluster.getClusterManager().stop(newDir); - cli = cluster.getJTClient(); - // make sure the cluster has actually stopped - waitForClusterStop(cli); - // start the daemons with original conf dir - cluster.getClusterManager().start(); - cli = cluster.getJTClient(); - waitForClusterStart(cli); - daemonConf = cluster.getJTClient().getProxy().getDaemonConf(); - Assert.assertTrue("Dummy variable is expected to be null after restart.", - daemonConf.get(DUMMY_CONFIG_STRING) == null); - lFile.delete(); - } - - private void waitForClusterStop(AbstractDaemonClient cli) throws Exception { - int i=1; - while (i < 40) { - try { - cli.ping(); - Thread.sleep(1000); - i++; - } catch (Exception e) { - break; - } - } - for (AbstractDaemonClient tcli : cluster.getTTClients()) { - i = 1; - while (i < 40) { - try { - tcli.ping(); - Thread.sleep(1000); - i++; - } catch (Exception e) { - break; - } - } - if (i >= 40) { - Assert.fail("TT on " + tcli.getHostName() + " Should have been down."); - } - } - } - - private void waitForClusterStart(AbstractDaemonClient cli) throws Exception { - int i=1; - while (i < 40) { - try { - cli.ping(); - break; - } catch (Exception e) { - i++; - Thread.sleep(1000); - LOG.info("Waiting for Jobtracker on host : " - + cli.getHostName() + " to come up."); - } - } - for (AbstractDaemonClient tcli : cluster.getTTClients()) { - i = 1; - while (i < 40) { - try { - tcli.ping(); - break; - } catch (Exception e) { - i++; - Thread.sleep(1000); - LOG.info("Waiting for Tasktracker on host : " - + tcli.getHostName() + " to come up."); - } - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java deleted file mode 100644 index aa0e1c263c7..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestSortValidate.java +++ /dev/null @@ -1,181 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import junit.framework.Assert; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.examples.RandomWriter; -import org.apache.hadoop.examples.Sort; - -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.MRCluster; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -/** - * A System test to test the Map-Reduce framework's sort - * with a real Map-Reduce Cluster. - */ -public class TestSortValidate { - // Input/Output paths for sort - private static final Path SORT_INPUT_PATH = new Path("inputDirectory"); - private static final Path SORT_OUTPUT_PATH = new Path("outputDirectory"); - - // make it big enough to cause a spill in the map - private static final int RW_BYTES_PER_MAP = 3 * 1024 * 1024; - private static final int RW_MAPS_PER_HOST = 2; - - private MRCluster cluster = null; - private FileSystem dfs = null; - private JobClient client = null; - - private static final Log LOG = LogFactory.getLog(TestSortValidate.class); - - public TestSortValidate() - throws Exception { - cluster = MRCluster.createCluster(new Configuration()); - } - - @Before - public void setUp() throws java.lang.Exception { - cluster.setUp(); - client = cluster.getJTClient().getClient(); - - dfs = client.getFs(); - dfs.delete(SORT_INPUT_PATH, true); - dfs.delete(SORT_OUTPUT_PATH, true); - } - - @After - public void after() throws Exception { - cluster.tearDown(); - dfs.delete(SORT_INPUT_PATH, true); - dfs.delete(SORT_OUTPUT_PATH, true); - } - - public void runRandomWriter(Configuration job, Path sortInput) - throws Exception { - // Scale down the default settings for RandomWriter for the test-case - // Generates NUM_HADOOP_SLAVES * RW_MAPS_PER_HOST * RW_BYTES_PER_MAP - job.setInt("test.randomwrite.bytes_per_map", RW_BYTES_PER_MAP); - job.setInt("test.randomwriter.maps_per_host", RW_MAPS_PER_HOST); - String[] rwArgs = {sortInput.toString()}; - - runAndVerify(job,new RandomWriter(), rwArgs); - } - - private void runAndVerify(Configuration job, Tool tool, String[] args) - throws Exception { - - // This calculates the previous number fo jobs submitted before a new - // job gets submitted. - int prevJobsNum = 0; - - // JTProtocol wovenClient - JTProtocol wovenClient = cluster.getJTClient().getProxy(); - - // JobStatus - JobStatus[] jobStatus = null; - - // JobID - JobID id = null; - - // RunningJob rJob; - RunningJob rJob = null; - - // JobInfo jInfo; - JobInfo jInfo = null; - - //Getting the previous job numbers that are submitted. - jobStatus = client.getAllJobs(); - prevJobsNum = jobStatus.length; - - // Run RandomWriter - Assert.assertEquals(ToolRunner.run(job, tool, args), 0); - - //Waiting for the job to appear in the jobstatus - jobStatus = client.getAllJobs(); - - while (jobStatus.length - prevJobsNum == 0) { - LOG.info("Waiting for the job to appear in the jobStatus"); - Thread.sleep(1000); - jobStatus = client.getAllJobs(); - } - - //Getting the jobId of the just submitted job - //The just submitted job is always added in the first slot of jobstatus - id = jobStatus[0].getJobID(); - - rJob = client.getJob(id); - - jInfo = wovenClient.getJobInfo(id); - - //Making sure that the job is complete. - while (jInfo != null && !jInfo.getStatus().isJobComplete()) { - Thread.sleep(10000); - jInfo = wovenClient.getJobInfo(id); - } - - cluster.getJTClient().verifyCompletedJob(id); - } - - private void runSort(Configuration job, Path sortInput, Path sortOutput) - throws Exception { - - job.setInt("io.sort.mb", 1); - - // Setup command-line arguments to 'sort' - String[] sortArgs = {sortInput.toString(), sortOutput.toString()}; - - runAndVerify(job,new Sort(), sortArgs); - - } - - private void runSortValidator(Configuration job, - Path sortInput, Path sortOutput) - throws Exception { - String[] svArgs = {"-sortInput", sortInput.toString(), - "-sortOutput", sortOutput.toString()}; - - runAndVerify(job,new SortValidator(), svArgs); - - } - - @Test - public void testMapReduceSort() throws Exception { - // Run randomwriter to generate input for 'sort' - runRandomWriter(cluster.getConf(), SORT_INPUT_PATH); - - // Run sort - runSort(cluster.getConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); - - // Run sort-validator to check if sort worked correctly - runSortValidator(cluster.getConf(), SORT_INPUT_PATH, - SORT_OUTPUT_PATH); - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java deleted file mode 100644 index d84f41a547b..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskKilling.java +++ /dev/null @@ -1,640 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.AfterClass; -import org.junit.Test; -import java.io.DataOutputStream; -import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.apache.hadoop.mapreduce.test.system.JTProtocol; -import org.apache.hadoop.mapreduce.test.system.JobInfo; -import org.apache.hadoop.mapreduce.test.system.TaskInfo; -import org.apache.hadoop.mapreduce.test.system.TTClient; -import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction; -import org.apache.hadoop.mapred.JobClient.NetworkedJob; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; - -/** - * A System test for verifying the status after killing the tasks at different - * conditions. - */ -public class TestTaskKilling { - private static final Log LOG = LogFactory.getLog(TestTaskKilling.class); - private static MRCluster cluster; - private static JobClient jobClient = null; - private static JTProtocol remoteJTClient = null; - - public TestTaskKilling() { - } - - @BeforeClass - public static void before() throws Exception { - Configuration conf = new Configuration(); - cluster = MRCluster.createCluster(conf); - cluster.setUp(); - jobClient = cluster.getJTClient().getClient(); - remoteJTClient = cluster.getJTClient().getProxy(); - } - - @AfterClass - public static void after() throws Exception { - cluster.tearDown(); - } - - /** - * Verifying the running job status whether it succeeds or not after failing - * some of its tasks. - * - * @throws ClassNotFoundException - */ - @Test - public void testFailedTaskJobStatus() - throws IOException, InterruptedException, ClassNotFoundException { - Configuration conf = new Configuration(cluster.getConf()); - TaskInfo taskInfo = null; - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(3, 1, 4000, 4000, 100, 100); - JobConf jobConf = new JobConf(conf); - jobConf.setMaxMapAttempts(20); - jobConf.setMaxReduceAttempts(20); - slpJob.submit(); - RunningJob runJob = - jobClient.getJob(org.apache.hadoop.mapred.JobID.downgrade(slpJob - .getJobID())); - JobID id = runJob.getID(); - JobInfo jInfo = remoteJTClient.getJobInfo(id); - int counter = 0; - while (counter < 60) { - if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) { - break; - } else { - UtilsForTests.waitFor(1000); - jInfo = remoteJTClient.getJobInfo(id); - } - counter++; - } - Assert.assertTrue("Job has not been started for 1 min.", counter != 60); - - TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id); - for (TaskInfo taskinfo : taskInfos) { - if (!taskinfo.isSetupOrCleanup()) { - taskInfo = taskinfo; - } - } - - counter = 0; - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - while (counter < 60) { - if (taskInfo.getTaskStatus().length > 0) { - if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) { - break; - } - } - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - counter++; - } - Assert.assertTrue("Task has not been started for 1 min.", counter != 60); - - NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster); - TaskID tID = TaskID.downgrade(taskInfo.getTaskID()); - TaskAttemptID taskAttID = new TaskAttemptID(tID, 0); - networkJob.killTask(taskAttID, false); - - LOG.info("Waiting till the job is completed..."); - while (!jInfo.getStatus().isJobComplete()) { - UtilsForTests.waitFor(100); - jInfo = remoteJTClient.getJobInfo(id); - } - - Assert.assertEquals( - "JobStatus", jInfo.getStatus().getRunState(), JobStatus.SUCCEEDED); - } - - /** - * Verifying whether task temporary output directory is cleaned up or not - * after killing the task. - */ - @Test - public void testDirCleanupAfterTaskKilled() - throws IOException, InterruptedException { - TaskInfo taskInfo = null; - boolean isTempFolderExists = false; - String localTaskDir = null; - TTClient ttClient = null; - TaskID tID = null; - FileStatus filesStatus[] = null; - Path inputDir = new Path("input"); - Path outputDir = new Path("output"); - Configuration conf = new Configuration(cluster.getConf()); - JobConf jconf = new JobConf(conf); - jconf.setJobName("Word Count"); - jconf.setJarByClass(WordCount.class); - jconf.setMapperClass(WordCount.MapClass.class); - jconf.setCombinerClass(WordCount.Reduce.class); - jconf.setReducerClass(WordCount.Reduce.class); - jconf.setNumMapTasks(1); - jconf.setNumReduceTasks(1); - jconf.setMaxMapAttempts(20); - jconf.setMaxReduceAttempts(20); - jconf.setOutputKeyClass(Text.class); - jconf.setOutputValueClass(IntWritable.class); - - cleanup(inputDir, conf); - cleanup(outputDir, conf); - createInput(inputDir, conf); - FileInputFormat.setInputPaths(jconf, inputDir); - FileOutputFormat.setOutputPath(jconf, outputDir); - RunningJob runJob = jobClient.submitJob(jconf); - JobID id = runJob.getID(); - JobInfo jInfo = remoteJTClient.getJobInfo(id); - int counter = 0; - while (counter < 60) { - if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) { - break; - } else { - UtilsForTests.waitFor(1000); - jInfo = remoteJTClient.getJobInfo(id); - } - counter++; - } - Assert.assertTrue("Job has not been started for 1 min.", counter != 60); - - JobStatus[] jobStatus = jobClient.getAllJobs(); - String userName = jobStatus[0].getUsername(); - TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id); - for (TaskInfo taskinfo : taskInfos) { - if (!taskinfo.isSetupOrCleanup()) { - taskInfo = taskinfo; - break; - } - } - - counter = 0; - while (counter < 30) { - if (taskInfo.getTaskStatus().length > 0) { - if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) { - break; - } - } - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - counter++; - } - Assert.assertTrue("Task has not been started for 30 sec.", counter != 30); - - tID = TaskID.downgrade(taskInfo.getTaskID()); - FinishTaskControlAction action = new FinishTaskControlAction(tID); - - String[] taskTrackers = taskInfo.getTaskTrackers(); - counter = 0; - while (counter < 30) { - if (taskTrackers.length != 0) { - break; - } - UtilsForTests.waitFor(100); - taskTrackers = taskInfo.getTaskTrackers(); - counter++; - } - - String hostName = taskTrackers[0].split("_")[1]; - hostName = hostName.split(":")[0]; - ttClient = cluster.getTTClient(hostName); - ttClient.getProxy().sendAction(action); - String localDirs[] = ttClient.getMapredLocalDirs(); - TaskAttemptID taskAttID = new TaskAttemptID(tID, 0); - for (String localDir : localDirs) { - localTaskDir = - localDir - + "/" - + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID - .toString()); - filesStatus = ttClient.listStatus(localTaskDir, true); - if (filesStatus.length > 0) { - isTempFolderExists = true; - NetworkedJob networkJob = new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster); - networkJob.killTask(taskAttID, false); - break; - } - } - - Assert.assertTrue( - "Task Attempt directory " - + taskAttID + " has not been found while task was running.", - isTempFolderExists); - taskInfo = remoteJTClient.getTaskInfo(tID); - - counter = 0; - while (counter < 60) { - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(tID); - filesStatus = ttClient.listStatus(localTaskDir, true); - if (filesStatus.length == 0) { - break; - } - counter++; - } - - Assert.assertTrue( - "Task attempt temporary folder has not been cleaned.", - isTempFolderExists && filesStatus.length == 0); - counter = 0; - while (counter < 30) { - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(tID); - counter++; - } - taskInfo = remoteJTClient.getTaskInfo(tID); - Assert.assertEquals( - "Task status has not been changed to KILLED.", TaskStatus.State.KILLED, - taskInfo.getTaskStatus()[0].getRunState()); - } - - private void cleanup(Path dir, Configuration conf) throws IOException { - FileSystem fs = dir.getFileSystem(conf); - fs.delete(dir, true); - } - - private void createInput(Path inDir, Configuration conf) throws IOException { - String input = - "Hadoop is framework for data intensive distributed " - + "applications.\n" - + "Hadoop enables applications to work with thousands of nodes."; - FileSystem fs = inDir.getFileSystem(conf); - if (!fs.mkdirs(inDir)) { - throw new IOException("Failed to create the input directory:" - + inDir.toString()); - } - fs.setPermission(inDir, new FsPermission( - FsAction.ALL, FsAction.ALL, FsAction.ALL)); - DataOutputStream file = fs.create(new Path(inDir, "data.txt")); - int i = 0; - while (i < 1000 * 3000) { - file.writeBytes(input); - i++; - } - file.close(); - } - - /** - * Verifying whether task temporary output directory is cleaned up or not - * after failing the task. - */ - @Test - public void testDirCleanupAfterTaskFailed() - throws IOException, InterruptedException { - TTClient ttClient = null; - FileStatus filesStatus[] = null; - String localTaskDir = null; - TaskInfo taskInfo = null; - TaskID tID = null; - boolean isTempFolderExists = false; - Path inputDir = new Path("input"); - Path outputDir = new Path("output"); - Configuration conf = new Configuration(cluster.getConf()); - JobConf jconf = new JobConf(conf); - jconf.setJobName("Task Failed job"); - jconf.setJarByClass(UtilsForTests.class); - jconf.setMapperClass(FailedMapperClass.class); - jconf.setNumMapTasks(1); - jconf.setNumReduceTasks(0); - jconf.setMaxMapAttempts(1); - cleanup(inputDir, conf); - cleanup(outputDir, conf); - createInput(inputDir, conf); - FileInputFormat.setInputPaths(jconf, inputDir); - FileOutputFormat.setOutputPath(jconf, outputDir); - RunningJob runJob = jobClient.submitJob(jconf); - JobID id = runJob.getID(); - JobInfo jInfo = remoteJTClient.getJobInfo(id); - - int counter = 0; - while (counter < 60) { - if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) { - break; - } else { - UtilsForTests.waitFor(1000); - jInfo = remoteJTClient.getJobInfo(id); - } - counter++; - } - Assert.assertTrue("Job has not been started for 1 min.", counter != 60); - - JobStatus[] jobStatus = jobClient.getAllJobs(); - String userName = jobStatus[0].getUsername(); - TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id); - for (TaskInfo taskinfo : taskInfos) { - if (!taskinfo.isSetupOrCleanup()) { - taskInfo = taskinfo; - break; - } - } - - tID = TaskID.downgrade(taskInfo.getTaskID()); - FinishTaskControlAction action = new FinishTaskControlAction(tID); - String[] taskTrackers = taskInfo.getTaskTrackers(); - counter = 0; - while (counter < 30) { - if (taskTrackers.length != 0) { - break; - } - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - taskTrackers = taskInfo.getTaskTrackers(); - counter++; - } - Assert.assertTrue("Task tracker not found.", taskTrackers.length != 0); - String hostName = taskTrackers[0].split("_")[1]; - hostName = hostName.split(":")[0]; - ttClient = cluster.getTTClient(hostName); - ttClient.getProxy().sendAction(action); - - counter = 0; - while (counter < 60) { - if (taskInfo.getTaskStatus().length > 0) { - if (taskInfo.getTaskStatus()[0].getRunState() == TaskStatus.State.RUNNING) { - break; - } - } - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - counter++; - } - Assert.assertTrue("Task has not been started for 1 min.", counter != 60); - - String localDirs[] = ttClient.getMapredLocalDirs(); - TaskAttemptID taskAttID = new TaskAttemptID(tID, 0); - for (String localDir : localDirs) { - localTaskDir = - localDir - + "/" - + TaskTracker.getLocalTaskDir(userName, id.toString(), taskAttID - .toString()); - filesStatus = ttClient.listStatus(localTaskDir, true); - if (filesStatus.length > 0) { - isTempFolderExists = true; - break; - } - } - - taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID()); - Assert.assertTrue( - "Task Attempt directory " - + taskAttID + " has not been found while task was running.", - isTempFolderExists); - counter = 0; - while (counter < 30) { - UtilsForTests.waitFor(1000); - taskInfo = remoteJTClient.getTaskInfo(tID); - counter++; - } - - Assert.assertEquals("Task status has not been changed to FAILED.", taskInfo - .getTaskStatus()[0].getRunState(), TaskStatus.State.FAILED); - - filesStatus = ttClient.listStatus(localTaskDir, true); - Assert.assertTrue( - "Temporary folder has not been cleanup.", filesStatus.length == 0); - } - - public static class FailedMapperClass - implements Mapper { - public void configure(JobConf job) { - } - - public void map( - NullWritable key, NullWritable value, - OutputCollector output, Reporter reporter) - throws IOException { - int counter = 0; - while (counter < 240) { - UtilsForTests.waitFor(1000); - counter++; - } - if (counter == 240) { - throw new IOException(); - } - } - - public void close() { - } - } - - @Test - /** - * This tests verification of job killing by killing of all task - * attempts of a particular task - * @param none - * @return void - */ - public void testAllTaskAttemptKill() throws Exception { - Configuration conf = new Configuration(cluster.getConf()); - - JobStatus[] jobStatus = null; - - SleepJob job = new SleepJob(); - job.setConf(conf); - Job slpJob = job.createJob(3, 1, 40000, 1000, 100, 100); - JobConf jconf = new JobConf(conf); - - // Submitting the job - slpJob.submit(); - RunningJob rJob = - cluster.getJTClient().getClient().getJob( - org.apache.hadoop.mapred.JobID.downgrade(slpJob.getJobID())); - - int MAX_MAP_TASK_ATTEMPTS = - Integer.parseInt(jconf.get(MRJobConfig.MAP_MAX_ATTEMPTS)); - - LOG.info("MAX_MAP_TASK_ATTEMPTS is : " + MAX_MAP_TASK_ATTEMPTS); - - Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0); - - TTClient tClient = null; - TTClient[] ttClients = null; - - JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID()); - - // Assert if jobInfo is null - Assert.assertNotNull(jInfo.getStatus().getRunState()); - - // Wait for the job to start running. - while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) { - try { - Thread.sleep(10000); - } catch (InterruptedException e) { - } - ; - jInfo = remoteJTClient.getJobInfo(rJob.getID()); - } - - // Temporarily store the jobid to use it later for comparision. - JobID jobidStore = rJob.getID(); - jobidStore = JobID.downgrade(jobidStore); - LOG.info("job id is :" + jobidStore.toString()); - - TaskInfo[] taskInfos = null; - - // After making sure that the job is running, - // the test execution has to make sure that - // at least one task has started running before continuing. - boolean runningCount = false; - int count = 0; - do { - taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID()); - runningCount = false; - for (TaskInfo taskInfo : taskInfos) { - TaskStatus[] taskStatuses = taskInfo.getTaskStatus(); - if (taskStatuses.length > 0) { - LOG.info("taskStatuses[0].getRunState() is :" - + taskStatuses[0].getRunState()); - if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING) { - runningCount = true; - break; - } else { - LOG.info("Sleeping 5 seconds"); - Thread.sleep(5000); - } - } - } - count++; - // If the count goes beyond a point, then break; This is to avoid - // infinite loop under unforeseen circumstances. Testcase will anyway - // fail later. - if (count > 10) { - Assert.fail("Since the sleep count has reached beyond a point" - + "failing at this point"); - } - } while (!runningCount); - - // This whole module is about getting the task Attempt id - // of one task and killing it MAX_MAP_TASK_ATTEMPTS times, - // whenever it re-attempts to run. - String taskIdKilled = null; - for (int i = 0; i < MAX_MAP_TASK_ATTEMPTS; i++) { - taskInfos = cluster.getJTClient().getProxy().getTaskInfo(rJob.getID()); - - for (TaskInfo taskInfo : taskInfos) { - TaskAttemptID taskAttemptID; - if (!taskInfo.isSetupOrCleanup()) { - // This is the task which is going to be killed continously in - // all its task attempts.The first task is getting picked up. - TaskID taskid = TaskID.downgrade(taskInfo.getTaskID()); - LOG.info("taskid is :" + taskid); - if (i == 0) { - taskIdKilled = taskid.toString(); - taskAttemptID = new TaskAttemptID(taskid, i); - LOG.info("taskAttemptid going to be killed is : " + taskAttemptID); - (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask( - taskAttemptID, true); - checkTaskCompletionEvent(taskAttemptID, jInfo); - break; - } else { - if (taskIdKilled.equals(taskid.toString())) { - taskAttemptID = new TaskAttemptID(taskid, i); - LOG - .info("taskAttemptid going to be killed is : " - + taskAttemptID); - (new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster)).killTask( - taskAttemptID, true); - checkTaskCompletionEvent(taskAttemptID, jInfo); - break; - } - } - } - } - } - // Making sure that the job is complete. - while (jInfo != null && !jInfo.getStatus().isJobComplete()) { - Thread.sleep(10000); - jInfo = remoteJTClient.getJobInfo(rJob.getID()); - } - - // Making sure that the correct jobstatus is got from all the jobs - jobStatus = jobClient.getAllJobs(); - JobStatus jobStatusFound = null; - for (JobStatus jobStatusTmp : jobStatus) { - if (JobID.downgrade(jobStatusTmp.getJobID()).equals(jobidStore)) { - jobStatusFound = jobStatusTmp; - LOG.info("jobStatus found is :" + jobStatusFound.getJobId().toString()); - } - } - - // Making sure that the job has FAILED - Assert.assertEquals( - "The job should have failed at this stage", JobStatus.FAILED, - jobStatusFound.getRunState()); - } - - // This method checks if task Attemptid occurs in the list - // of tasks that are completed (killed) for a job.This is - // required because after issuing a kill comamnd, the task - // has to be killed and appear in the taskCompletion event. - // After this a new task attempt will start running in a - // matter of few seconds. - public void checkTaskCompletionEvent( - TaskAttemptID taskAttemptID, JobInfo jInfo) throws Exception { - boolean match = false; - int count = 0; - while (!match) { - TaskCompletionEvent[] taskCompletionEvents = - new JobClient.NetworkedJob(jInfo.getStatus(),jobClient.cluster) - .getTaskCompletionEvents(0); - for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) { - if ((taskCompletionEvent.getTaskAttemptId().toString()) - .equals(taskAttemptID.toString())) { - match = true; - // Sleeping for 10 seconds giving time for the next task - // attempt to run - Thread.sleep(10000); - break; - } - } - if (!match) { - LOG.info("Thread is sleeping for 10 seconds"); - Thread.sleep(10000); - count++; - } - // If the count goes beyond a point, then break; This is to avoid - // infinite loop under unforeseen circumstances.Testcase will anyway - // fail later. - if (count > 10) { - Assert.fail("Since the task attemptid is not appearing in the" - + "TaskCompletionEvent, it seems this task attempt was not killed"); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java b/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java deleted file mode 100644 index 76d658d586a..00000000000 --- a/hadoop-mapreduce-project/src/test/system/test/org/apache/hadoop/mapred/TestTaskOwner.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.util.StringTokenizer; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.mapreduce.test.system.MRCluster; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import testjar.UserNamePermission; - -public class TestTaskOwner { - private static final Log LOG = LogFactory.getLog(TestTaskOwner.class); - private static Path outDir = new Path("output"); - private static Path inDir = new Path("input"); - public static MRCluster cluster; - - // The role of this job is to write the user name to the output file - // which will be parsed - - @BeforeClass - public static void setUp() throws java.lang.Exception { - - cluster = MRCluster.createCluster(new Configuration()); - cluster.setUp(); - FileSystem fs = inDir.getFileSystem(cluster.getJTClient().getConf()); - // Make sure that all is clean in case last tearDown wasn't successful - fs.delete(outDir, true); - fs.delete(inDir, true); - - fs.create(inDir, true); - } - - @Test - public void testProcessPermission() throws Exception { - // The user will submit a job which a plain old map reduce job - // this job will output the username of the task that is running - // in the cluster and we will authenticate whether matches - // with the job that is submitted by the same user. - - Configuration conf = cluster.getJTClient().getConf(); - Job job = new Job(conf, "user name check"); - - job.setJarByClass(UserNamePermission.class); - job.setMapperClass(UserNamePermission.UserNameMapper.class); - job.setCombinerClass(UserNamePermission.UserNameReducer.class); - job.setMapOutputKeyClass(Text.class); - job.setMapOutputValueClass(Text.class); - - job.setReducerClass(UserNamePermission.UserNameReducer.class); - job.setNumReduceTasks(1); - - FileInputFormat.addInputPath(job, inDir); - FileOutputFormat.setOutputPath(job, outDir); - - job.waitForCompletion(true); - - // now verify the user name that is written by the task tracker is same - // as the - // user name that was used to launch the task in the first place - FileSystem fs = outDir.getFileSystem(conf); - - Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir, - new Utils.OutputFileUtils.OutputFilesFilter())); - - for (int i = 0; i < fileList.length; ++i) { - LOG.info("File list[" + i + "]" + ": " + fileList[i]); - BufferedReader file = new BufferedReader(new InputStreamReader(fs - .open(fileList[i]))); - String line = file.readLine(); - while (line != null) { - StringTokenizer token = new StringTokenizer(line); - if (token.hasMoreTokens()) { - LOG.info("First token " + token.nextToken()); - String userName = token.nextToken(); - - LOG.info("Next token " + userName); - Assert - .assertEquals( - "The user name did not match permission violation ", - userName, System.getProperty("user.name") - .toString()); - break; - } - } - file.close(); - } - } - - @AfterClass - public static void tearDown() throws java.lang.Exception { - FileSystem fs = outDir.getFileSystem(cluster.getJTClient().getConf()); - fs.delete(outDir, true); - fs.delete(inDir, true); - cluster.tearDown(); - } -} - -