HADOOP-8450. Remove src/test/system. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1345795 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-06-03 22:21:06 +00:00
parent ef8c032a86
commit 8960db7605
144 changed files with 0 additions and 21180 deletions

View File

@ -1,400 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
/**
* Default DaemonProtocolAspect which is used to provide default implementation
* for all the common daemon methods. If a daemon requires more specialized
* version of method, it is responsibility of the DaemonClient to introduce the
* same in woven classes.
*
*/
public aspect DaemonProtocolAspect {
private boolean DaemonProtocol.ready;
@SuppressWarnings("unchecked")
private HashMap<Object, List<ControlAction>> DaemonProtocol.actions =
new HashMap<Object, List<ControlAction>>();
private static final Log LOG = LogFactory.getLog(
DaemonProtocolAspect.class.getName());
private static FsPermission defaultPermission = new FsPermission(
FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
/**
* Set if the daemon process is ready or not, concrete daemon protocol should
* implement pointcuts to determine when the daemon is ready and use the
* setter to set the ready state.
*
* @param ready
* true if the Daemon is ready.
*/
public void DaemonProtocol.setReady(boolean ready) {
this.ready = ready;
}
/**
* Checks if the daemon process is alive or not.
*
* @throws IOException
* if daemon is not alive.
*/
public void DaemonProtocol.ping() throws IOException {
}
/**
* Checks if the daemon process is ready to accepting RPC connections after it
* finishes initialization. <br/>
*
* @return true if ready to accept connection.
*
* @throws IOException
*/
public boolean DaemonProtocol.isReady() throws IOException {
return ready;
}
/**
* Returns the process related information regarding the daemon process. <br/>
*
* @return process information.
* @throws IOException
*/
public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException {
int activeThreadCount = Thread.activeCount();
long currentTime = System.currentTimeMillis();
long maxmem = Runtime.getRuntime().maxMemory();
long freemem = Runtime.getRuntime().freeMemory();
long totalmem = Runtime.getRuntime().totalMemory();
Map<String, String> envMap = System.getenv();
Properties sysProps = System.getProperties();
Map<String, String> props = new HashMap<String, String>();
for (Map.Entry entry : sysProps.entrySet()) {
props.put((String) entry.getKey(), (String) entry.getValue());
}
ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime,
freemem, maxmem, totalmem, envMap, props);
return info;
}
public void DaemonProtocol.enable(List<Enum<?>> faults) throws IOException {
}
public void DaemonProtocol.disableAll() throws IOException {
}
public abstract Configuration DaemonProtocol.getDaemonConf()
throws IOException;
public FileStatus DaemonProtocol.getFileStatus(String path, boolean local)
throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
p.makeQualified(fs);
FileStatus fileStatus = fs.getFileStatus(p);
return cloneFileStatus(fileStatus);
}
/**
* Create a file with given permissions in a file system.
* @param path - source path where the file has to create.
* @param fileName - file name.
* @param permission - file permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void DaemonProtocol.createFile(String path, String fileName,
FsPermission permission, boolean local) throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
Path filePath = new Path(path, fileName);
fs.create(filePath);
if (permission == null) {
fs.setPermission(filePath, defaultPermission);
} else {
fs.setPermission(filePath, permission);
}
fs.close();
}
/**
* Create a folder with given permissions in a file system.
* @param path - source path where the file has to be creating.
* @param folderName - folder name.
* @param permission - folder permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void DaemonProtocol.createFolder(String path, String folderName,
FsPermission permission, boolean local) throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
Path folderPath = new Path(path, folderName);
fs.mkdirs(folderPath);
if (permission == null) {
fs.setPermission(folderPath, defaultPermission);
} else {
fs.setPermission(folderPath, permission);
}
fs.close();
}
public FileStatus[] DaemonProtocol.listStatus(String path, boolean local)
throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
FileStatus[] status = fs.listStatus(p);
if (status != null) {
FileStatus[] result = new FileStatus[status.length];
int i = 0;
for (FileStatus fileStatus : status) {
result[i++] = cloneFileStatus(fileStatus);
}
return result;
}
return status;
}
/**
* FileStatus object may not be serializable. Clone it into raw FileStatus
* object.
*/
private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) {
return new FileStatus(fileStatus.getLen(),
fileStatus.isDir(),
fileStatus.getReplication(),
fileStatus.getBlockSize(),
fileStatus.getModificationTime(),
fileStatus.getAccessTime(),
fileStatus.getPermission(),
fileStatus.getOwner(),
fileStatus.getGroup(),
fileStatus.getPath());
}
private FileSystem DaemonProtocol.getFS(final Path path, final boolean local)
throws IOException {
FileSystem ret = null;
try {
ret = UserGroupInformation.getLoginUser().doAs (
new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
FileSystem fs = null;
if (local) {
fs = FileSystem.getLocal(getDaemonConf());
} else {
fs = path.getFileSystem(getDaemonConf());
}
return fs;
}
});
} catch (InterruptedException ie) {
}
return ret;
}
@SuppressWarnings("unchecked")
public ControlAction[] DaemonProtocol.getActions(Writable key)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(key);
if(actionList == null) {
return new ControlAction[0];
} else {
return (ControlAction[]) actionList.toArray(new ControlAction[actionList
.size()]);
}
}
}
@SuppressWarnings("unchecked")
public void DaemonProtocol.sendAction(ControlAction action)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
actionList = new ArrayList<ControlAction>();
actions.put(action.getTarget(), actionList);
}
actionList.add(action);
}
}
@SuppressWarnings("unchecked")
public boolean DaemonProtocol.isActionPending(ControlAction action)
throws IOException{
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
return false;
} else {
return actionList.contains(action);
}
}
}
@SuppressWarnings("unchecked")
public void DaemonProtocol.removeAction(ControlAction action)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
return;
} else {
actionList.remove(action);
}
}
}
public void DaemonProtocol.clearActions() throws IOException {
synchronized (actions) {
actions.clear();
}
}
public String DaemonProtocol.getFilePattern() {
//We use the environment variable HADOOP_LOGFILE to get the
//pattern to use in the search.
String logDir = System.getProperty("hadoop.log.dir");
String daemonLogPattern = System.getProperty("hadoop.log.file");
if(daemonLogPattern == null && daemonLogPattern.isEmpty()) {
return "*";
}
return logDir+File.separator+daemonLogPattern+"*";
}
public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
String[] list) throws IOException {
StringBuffer filePattern = new StringBuffer(getFilePattern());
String[] cmd = null;
if (list != null) {
StringBuffer filterExpPattern = new StringBuffer();
int index=0;
for (String excludeExp : list) {
if (index++ < list.length -1) {
filterExpPattern.append("grep -v " + excludeExp + " | ");
} else {
filterExpPattern.append("grep -v " + excludeExp + " | wc -l");
}
}
cmd = new String[] {
"bash",
"-c",
"grep "
+ pattern + " " + filePattern + " | "
+ filterExpPattern};
} else {
cmd = new String[] {
"bash",
"-c",
"grep -c "
+ pattern + " " + filePattern
+ " | awk -F: '{s+=$2} END {print s}'" };
}
ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
shexec.execute();
String output = shexec.getOutput();
return Integer.parseInt(output.replaceAll("\n", "").trim());
}
/**
* This method is used for suspending the process.
* @param pid process id
* @throws IOException if an I/O error occurs.
* @return true if process is suspended otherwise false.
*/
public boolean DaemonProtocol.suspendProcess(String pid) throws IOException {
String suspendCmd = getDaemonConf().get("test.system.hdrc.suspend.cmd",
"kill -SIGSTOP");
String [] command = {"bash", "-c", suspendCmd + " " + pid};
ShellCommandExecutor shexec = new ShellCommandExecutor(command);
try {
shexec.execute();
} catch (Shell.ExitCodeException e) {
LOG.warn("suspended process throws an exitcode "
+ "exception for not being suspended the given process id.");
return false;
}
LOG.info("The suspend process command is :"
+ shexec.toString()
+ " and the output for the command is "
+ shexec.getOutput());
return true;
}
/**
* This method is used for resuming the process
* @param pid process id of suspended process.
* @throws IOException if an I/O error occurs.
* @return true if suspeneded process is resumed otherwise false.
*/
public boolean DaemonProtocol.resumeProcess(String pid) throws IOException {
String resumeCmd = getDaemonConf().get("test.system.hdrc.resume.cmd",
"kill -SIGCONT");
String [] command = {"bash", "-c", resumeCmd + " " + pid};
ShellCommandExecutor shexec = new ShellCommandExecutor(command);
try {
shexec.execute();
} catch(Shell.ExitCodeException e) {
LOG.warn("Resume process throws an exitcode "
+ "exception for not being resumed the given process id.");
return false;
}
LOG.info("The resume process command is :"
+ shexec.toString()
+ " and the output for the command is "
+ shexec.getOutput());
return true;
}
private String DaemonProtocol.user = null;
public String DaemonProtocol.getDaemonUser() {
return user;
}
public void DaemonProtocol.setUser(String user) {
this.user = user;
}
}

View File

@ -1,41 +0,0 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
OBJS=main.o runAs.o
CC=@CC@
CFLAGS = @CFLAGS@
BINARY=runAs
installdir = @prefix@
all: $(OBJS)
$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
main.o: runAs.o main.c
$(CC) $(CFLAG) -o main.o -c main.c
runAs.o: runAs.h runAs.c
$(CC) $(CFLAG) -o runAs.o -c runAs.c
clean:
rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
install: all
cp $(BINARY) $(installdir)
uninstall:
rm -rf $(installdir)/$(BINARY)
rm -rf $(BINARY)

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +0,0 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
AC_PREREQ(2.59)
AC_INIT([runAs],[0.1])
#changing default prefix value to empty string, so that binary does not
#gets installed within system
AC_PREFIX_DEFAULT(.)
#add new arguments --with-home
AC_ARG_WITH(home,[--with-home path to hadoop home dir])
AC_CONFIG_SRCDIR([main.c])
AC_CONFIG_HEADER([runAs.h])
# Checks for programs.
AC_PROG_CC
# Checks for libraries.
# Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
#check for HADOOP_PREFIX
if test "$with_home" != ""
then
AC_DEFINE_UNQUOTED(HADOOP_PREFIX,"$with_home")
fi
# Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
AC_TYPE_PID_T
AC_TYPE_MODE_T
AC_TYPE_SIZE_T
# Checks for library functions.
AC_FUNC_MALLOC
AC_FUNC_REALLOC
AC_FUNC_CHOWN
AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
AC_HEADER_STDBOOL
AC_PROG_MAKE_SET

View File

@ -1,59 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runAs.h"
/**
* The binary would be accepting the command of following format:
* cluster-controller user hostname hadoop-daemon.sh-command
*/
int main(int argc, char **argv) {
int errorcode;
char *user;
char *hostname;
char *command;
struct passwd user_detail;
int i = 1;
/*
* Minimum number of arguments required for the binary to perform.
*/
if (argc < 4) {
fprintf(stderr, "Invalid number of arguments passed to the binary\n");
return INVALID_ARGUMENT_NUMER;
}
user = argv[1];
if (user == NULL) {
fprintf(stderr, "Invalid user name\n");
return INVALID_USER_NAME;
}
if (getuserdetail(user, &user_detail) != 0) {
fprintf(stderr, "Invalid user name\n");
return INVALID_USER_NAME;
}
if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
fprintf(stderr, "Cannot run tasks as super user\n");
return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
}
hostname = argv[2];
command = argv[3];
return process_controller_command(user, hostname, command);
}

View File

@ -1,111 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runAs.h"
/*
* Function to get the user details populated given a user name.
*/
int getuserdetail(char *user, struct passwd *user_detail) {
struct passwd *tempPwdPtr;
int size = sysconf(_SC_GETPW_R_SIZE_MAX);
char pwdbuffer[size];
if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
fprintf(stderr, "Invalid user provided to getpwnam\n");
return -1;
}
return 0;
}
/**
* Function to switch the user identity and set the appropriate
* group control as the user specified in the argument.
*/
int switchuser(char *user) {
//populate the user details
struct passwd user_detail;
if ((getuserdetail(user, &user_detail)) != 0) {
return INVALID_USER_NAME;
}
//set the right supplementary groups for the user.
if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
fprintf(stderr, "Init groups call for the user : %s failed\n",
user_detail.pw_name);
return INITGROUPS_FAILED;
}
errno = 0;
//switch the group.
setgid(user_detail.pw_gid);
if (errno != 0) {
fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
errno = 0;
//swith the user
setuid(user_detail.pw_uid);
if (errno != 0) {
fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
errno = 0;
//set the effective user id.
seteuid(user_detail.pw_uid);
if (errno != 0) {
fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
return 0;
}
/*
* Top level method which processes a cluster management
* command.
*/
int process_cluster_command(char * user, char * node , char *command) {
char *finalcommandstr;
int len;
int errorcode = 0;
if (strncmp(command, "", strlen(command)) == 0) {
fprintf(stderr, "Invalid command passed\n");
return INVALID_COMMAND_PASSED;
}
len = STRLEN + strlen(command);
finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_PREFIX,
command);
finalcommandstr[len + 1] = '\0';
errorcode = switchuser(user);
if (errorcode != 0) {
fprintf(stderr, "switch user failed\n");
return errorcode;
}
errno = 0;
execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
if (errno != 0) {
fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
}
return 0;
}
/*
* Process cluster controller command the API exposed to the
* main in order to execute the cluster commands.
*/
int process_controller_command(char *user, char * node, char *command) {
return process_cluster_command(user, node, command);
}

View File

@ -1,59 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
#include <assert.h>
#include <getopt.h>
#include <grp.h>
/*
* List of possible error codes.
*/
enum errorcodes {
INVALID_ARGUMENT_NUMER = 1,
INVALID_USER_NAME, //2
SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
INITGROUPS_FAILED, //4
SETUID_OPER_FAILED, //5
INVALID_COMMAND_PASSED, //6
};
#undef HADOOP_PREFIX
#define SSH_COMMAND "ssh"
#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded
#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_PREFIX)
/*
* Function to get the user details populated given a user name.
*/
int getuserdetails(char *user, struct passwd *user_detail);
/*
* Process cluster controller command the API exposed to the
* main in order to execute the cluster commands.
*/
int process_controller_command(char *user, char *node, char *command);

View File

@ -1,68 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration>
<!--
This is Herriot specific protocols. This section shouldn't be present in
a production cluster configuration. This file needs to be linked up to the
main conf/hadoop-policy.xml in the deployment process
-->
<property>
<name>security.daemon.protocol.acl</name>
<value>*</value>
<description>ACL for DaemonProtocol, extended by all other
Herriot RPC protocols.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.nn.protocol.acl</name>
<value>*</value>
<description>ACL for NNProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
NameNode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.dn.protocol.acl</name>
<value>*</value>
<description>ACL for DNProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
DataNode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.tt.protocol.acl</name>
<value>*</value>
<description>ACL for TTProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
TaskTracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
</configuration>

View File

@ -1,599 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.IOException;
import java.util.*;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.system.process.RemoteProcess;
import javax.management.*;
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
/**
* Abstract class which encapsulates the DaemonClient which is used in the
* system tests.<br/>
*
* @param PROXY the proxy implementation of a specific Daemon
*/
public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
private Configuration conf;
private Boolean jmxEnabled = null;
private MBeanServerConnection connection;
private int jmxPortNumber = -1;
private RemoteProcess process;
private boolean connected;
private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
private static final String HADOOP_JMX_DOMAIN = "Hadoop";
private static final String HADOOP_OPTS_ENV = "HADOOP_OPTS";
/**
* Create a Daemon client.<br/>
*
* @param conf client to be used by proxy to connect to Daemon.
* @param process the Daemon process to manage the particular daemon.
*
* @throws IOException on RPC error
*/
public AbstractDaemonClient(Configuration conf, RemoteProcess process)
throws IOException {
this.conf = conf;
this.process = process;
}
/**
* Gets if the client is connected to the Daemon <br/>
*
* @return true if connected.
*/
public boolean isConnected() {
return connected;
}
protected void setConnected(boolean connected) {
this.connected = connected;
}
/**
* Create an RPC proxy to the daemon <br/>
*
* @throws IOException on RPC error
*/
public abstract void connect() throws IOException;
/**
* Disconnect the underlying RPC proxy to the daemon.<br/>
* @throws IOException in case of communication errors
*/
public abstract void disconnect() throws IOException;
/**
* Get the proxy to connect to a particular service Daemon.<br/>
*
* @return proxy to connect to a particular service Daemon.
*/
protected abstract PROXY getProxy();
/**
* Gets the daemon level configuration.<br/>
*
* @return configuration using which daemon is running
*/
public Configuration getConf() {
return conf;
}
/**
* Gets the host on which Daemon is currently running. <br/>
*
* @return hostname
*/
public String getHostName() {
return process.getHostName();
}
/**
* Gets if the Daemon is ready to accept RPC connections. <br/>
*
* @return true if daemon is ready.
* @throws IOException on RPC error
*/
public boolean isReady() throws IOException {
return getProxy().isReady();
}
/**
* Kills the Daemon process <br/>
* @throws IOException on RPC error
*/
public void kill() throws IOException {
process.kill();
}
/**
* Checks if the Daemon process is alive or not <br/>
* @throws IOException on RPC error
*/
public void ping() throws IOException {
getProxy().ping();
}
/**
* Start up the Daemon process. <br/>
* @throws IOException on RPC error
*/
public void start() throws IOException {
process.start();
}
/**
* Get system level view of the Daemon process.
*
* @return returns system level view of the Daemon process.
*
* @throws IOException on RPC error.
*/
public ProcessInfo getProcessInfo() throws IOException {
return getProxy().getProcessInfo();
}
/**
* Abstract method to retrieve the name of a daemon specific env. var
* @return name of Hadoop environment variable containing a daemon options
*/
abstract public String getHadoopOptsEnvName ();
/**
* Checks remote daemon process info to see if certain JMX sys. properties
* are available and reckon if the JMX service is enabled on the remote side
*
* @return <code>boolean</code> code indicating availability of remote JMX
* @throws IOException is throws in case of communication errors
*/
public boolean isJmxEnabled() throws IOException {
return isJmxEnabled(HADOOP_OPTS_ENV) ||
isJmxEnabled(getHadoopOptsEnvName());
}
/**
* Checks remote daemon process info to see if certain JMX sys. properties
* are available and reckon if the JMX service is enabled on the remote side
*
* @param envivar name of an evironment variable to be searched
* @return <code>boolean</code> code indicating availability of remote JMX
* @throws IOException is throws in case of communication errors
*/
protected boolean isJmxEnabled(String envivar) throws IOException {
if (jmxEnabled != null) return jmxEnabled;
boolean ret = false;
String jmxRemoteString = "-Dcom.sun.management.jmxremote";
String hadoopOpts = getProcessInfo().getEnv().get(envivar);
LOG.debug("Looking into " + hadoopOpts + " from " + envivar);
List<String> options = Arrays.asList(hadoopOpts.split(" "));
ret = options.contains(jmxRemoteString);
jmxEnabled = ret;
return ret;
}
/**
* Checks remote daemon process info to find remote JMX server port number
* By default this method will look into "HADOOP_OPTS" variable only.
* @return number of remote JMX server or -1 if it can't be found
* @throws IOException is throws in case of communication errors
* @throws IllegalArgumentException if non-integer port is set
* in the remote process info
*/
public int getJmxPortNumber() throws IOException, IllegalArgumentException {
int portNo = getJmxPortNumber(HADOOP_OPTS_ENV);
return portNo != -1 ? portNo : getJmxPortNumber(getHadoopOptsEnvName());
}
/**
* Checks remote daemon process info to find remote JMX server port number
*
* @param envivar name of the env. var. to look for JMX specific settings
* @return number of remote JMX server or -1 if it can't be found
* @throws IOException is throws in case of communication errors
* @throws IllegalArgumentException if non-integer port is set
* in the remote process info
*/
protected int getJmxPortNumber(final String envivar) throws
IOException, IllegalArgumentException {
if (jmxPortNumber != -1) return jmxPortNumber;
String jmxPortString = "-Dcom.sun.management.jmxremote.port";
String hadoopOpts = getProcessInfo().getEnv().get(envivar);
int portNumber = -1;
boolean found = false;
String[] options = hadoopOpts.split(" ");
for (String option : options) {
if (option.startsWith(jmxPortString)) {
found = true;
try {
portNumber = Integer.parseInt(option.split("=")[1]);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("JMX port number isn't integer");
}
break;
}
}
if (!found)
throw new IllegalArgumentException("Can't detect JMX port number");
jmxPortNumber = portNumber;
return jmxPortNumber;
}
/**
* Return a file status object that represents the path.
* @param path
* given path
* @param local
* whether the path is local or not
* @return a FileStatus object
* @throws IOException see specific implementation
*/
public FileStatus getFileStatus(String path, boolean local) throws IOException {
return getProxy().getFileStatus(path, local);
}
/**
* Create a file with full permissions in a file system.
* @param path - source path where the file has to create.
* @param fileName - file name
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void createFile(String path, String fileName,
boolean local) throws IOException {
getProxy().createFile(path, fileName, null, local);
}
/**
* Create a file with given permissions in a file system.
* @param path - source path where the file has to create.
* @param fileName - file name.
* @param permission - file permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void createFile(String path, String fileName,
FsPermission permission, boolean local) throws IOException {
getProxy().createFile(path, fileName, permission, local);
}
/**
* Create a folder with default permissions in a file system.
* @param path - source path where the file has to be creating.
* @param folderName - folder name.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void createFolder(String path, String folderName,
boolean local) throws IOException {
getProxy().createFolder(path, folderName, null, local);
}
/**
* Create a folder with given permissions in a file system.
* @param path - source path where the file has to be creating.
* @param folderName - folder name.
* @param permission - folder permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void createFolder(String path, String folderName,
FsPermission permission, boolean local) throws IOException {
getProxy().createFolder(path, folderName, permission, local);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param path
* given path
* @param local
* whether the path is local or not
* @return the statuses of the files/directories in the given patch
* @throws IOException on RPC error.
*/
public FileStatus[] listStatus(String path, boolean local)
throws IOException {
return getProxy().listStatus(path, local);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory recursive/nonrecursively depending on parameters
*
* @param path
* given path
* @param local
* whether the path is local or not
* @param recursive
* whether to recursively get the status
* @return the statuses of the files/directories in the given patch
* @throws IOException is thrown on RPC error.
*/
public FileStatus[] listStatus(String path, boolean local, boolean recursive)
throws IOException {
List<FileStatus> status = new ArrayList<FileStatus>();
addStatus(status, path, local, recursive);
return status.toArray(new FileStatus[0]);
}
private void addStatus(List<FileStatus> status, String f,
boolean local, boolean recursive)
throws IOException {
FileStatus[] fs = listStatus(f, local);
if (fs != null) {
for (FileStatus fileStatus : fs) {
if (!f.equals(fileStatus.getPath().toString())) {
status.add(fileStatus);
if (recursive) {
addStatus(status, fileStatus.getPath().toString(), local, recursive);
}
}
}
}
}
/**
* Gets number of times FATAL log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is FATAL. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of fatal message.
* @throws IOException in case of communication errors
*/
public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "FATAL";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of times ERROR log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is ERROR. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of error message.
* @throws IOException is thrown on RPC error.
*/
public int getNumberOfErrorStatementsInLog(String[] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "ERROR";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of times Warning log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is WARN. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of warning message.
* @throws IOException thrown on RPC error.
*/
public int getNumberOfWarnStatementsInLog(String[] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "WARN";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of time given Exception were present in log file. <br/>
*
* @param e exception class.
* @param excludeExpList list of exceptions to exclude.
* @return number of exceptions in log
* @throws IOException is thrown on RPC error.
*/
public int getNumberOfExceptionsInLog(Exception e,
String[] excludeExpList) throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = e.getClass().getSimpleName();
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Number of times ConcurrentModificationException present in log file.
* <br/>
* @param excludeExpList list of exceptions to exclude.
* @return number of times exception in log file.
* @throws IOException is thrown on RPC error.
*/
public int getNumberOfConcurrentModificationExceptionsInLog(
String[] excludeExpList) throws IOException {
return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
excludeExpList);
}
private int errorCount;
private int fatalCount;
private int concurrentExceptionCount;
/**
* Populate the initial exception counts to be used to assert once a testcase
* is done there was no exception in the daemon when testcase was run.
* @param excludeExpList list of exceptions to exclude
* @throws IOException is thrown on RPC error.
*/
protected void populateExceptionCount(String [] excludeExpList)
throws IOException {
errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
LOG.info("Number of error messages in logs : " + errorCount);
fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
LOG.info("Number of fatal statement in logs : " + fatalCount);
concurrentExceptionCount =
getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
LOG.info("Number of concurrent modification in logs : "
+ concurrentExceptionCount);
}
/**
* Assert if the new exceptions were logged into the log file.
* <br/>
* <b><i>
* Pre-req for the method is that populateExceptionCount() has
* to be called before calling this method.</b></i>
* @param excludeExpList list of exceptions to exclude
* @throws IOException is thrown on RPC error.
*/
protected void assertNoExceptionsOccurred(String [] excludeExpList)
throws IOException {
int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
LOG.info("Number of error messages while asserting :" + newerrorCount);
int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
LOG.info("Number of fatal messages while asserting : " + newfatalCount);
int newconcurrentExceptionCount =
getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
LOG.info("Number of concurrentmodification exception while asserting :"
+ newconcurrentExceptionCount);
Assert.assertEquals(
"New Error Messages logged in the log file", errorCount, newerrorCount);
Assert.assertEquals(
"New Fatal messages logged in the log file", fatalCount, newfatalCount);
Assert.assertEquals(
"New ConcurrentModificationException in log file",
concurrentExceptionCount, newconcurrentExceptionCount);
}
/**
* Builds correct name of JMX object name from given domain, service name, type
* @param domain JMX domain name
* @param serviceName of the service where MBean is registered (NameNode)
* @param typeName of the MXBean class
* @return ObjectName for requested MXBean of <code>null</code> if one wasn't
* found
* @throws java.io.IOException in if object name is malformed
*/
protected ObjectName getJmxBeanName(String domain, String serviceName,
String typeName) throws IOException {
if (domain == null)
domain = HADOOP_JMX_DOMAIN;
ObjectName jmxBean;
try {
jmxBean = new ObjectName(domain + ":service=" + serviceName +
",name=" + typeName);
} catch (MalformedObjectNameException e) {
LOG.debug(e.getStackTrace());
throw new IOException(e);
}
return jmxBean;
}
/**
* Create connection with the remote JMX server at given host and port
* @param host name of the remote JMX server host
* @param port port number of the remote JXM server host
* @return instance of MBeanServerConnection or <code>null</code> if one
* hasn't been established
* @throws IOException in case of comminication errors
*/
protected MBeanServerConnection establishJmxConnection(String host, int port)
throws IOException {
if (connection != null) return connection;
String urlPattern = null;
try {
urlPattern = "service:jmx:rmi:///jndi/rmi://" +
host + ":" + port +
"/jmxrmi";
JMXServiceURL url = new JMXServiceURL(urlPattern);
JMXConnector connector = JMXConnectorFactory.connect(url, null);
connection = connector.getMBeanServerConnection();
} catch (java.net.MalformedURLException badURLExc) {
LOG.debug("bad url: " + urlPattern, badURLExc);
throw new IOException(badURLExc);
}
return connection;
}
Hashtable<String, ObjectName> jmxObjectNames =
new Hashtable<String, ObjectName>();
/**
* Method implements all logic for receiving a bean's attribute.
* If any initializations such as establishing bean server connections, etc.
* are need it will do it.
* @param serviceName name of the service where MBean is registered (NameNode)
* @param type name of the MXBean class
* @param attributeName name of the attribute to be retrieved
* @return Object value of the attribute or <code>null</code> if not found
* @throws IOException is thrown in case of any errors
*/
protected Object getJmxAttribute (String serviceName,
String type,
String attributeName)
throws IOException {
Object retAttribute = null;
String domain = null;
if (isJmxEnabled()) {
try {
MBeanServerConnection conn =
establishJmxConnection(getHostName(),
getJmxPortNumber(HADOOP_OPTS_ENV));
for (String d : conn.getDomains()) {
if (d != null && d.startsWith(HADOOP_JMX_DOMAIN))
domain = d;
}
if (!jmxObjectNames.containsKey(type))
jmxObjectNames.put(type, getJmxBeanName(domain, serviceName, type));
retAttribute =
conn.getAttribute(jmxObjectNames.get(type), attributeName);
} catch (MBeanException e) {
LOG.debug(e.getStackTrace());
throw new IOException(e);
} catch (AttributeNotFoundException e) {
LOG.warn(e.getStackTrace());
throw new IOException(e);
} catch (InstanceNotFoundException e) {
LOG.warn(e.getStackTrace());
throw new IOException(e);
} catch (ReflectionException e) {
LOG.debug(e.getStackTrace());
throw new IOException(e);
}
}
return retAttribute;
}
/**
* This method has to be implemented by appropriate concrete daemon client
* e.g. DNClient, NNClient, etc.
* Concrete implementation has to provide names of the service and bean type
* @param attributeName name of the attribute to be retrieved
* @return Object value of the given attribute
* @throws IOException is thrown in case of communication errors
*/
public abstract Object getDaemonAttribute (String attributeName)
throws IOException;
}

View File

@ -1,537 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.IOException;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileInputStream;
import java.io.DataInputStream;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Iterator;
import java.util.Enumeration;
import java.util.Arrays;
import java.util.Hashtable;
import java.net.URI;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.system.process.ClusterProcessManager;
import org.apache.hadoop.test.system.process.RemoteProcess;
/**
* Abstract class which represent the cluster having multiple daemons.
*/
@SuppressWarnings("unchecked")
public abstract class AbstractDaemonCluster {
private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
private String [] excludeExpList ;
private Configuration conf;
protected ClusterProcessManager clusterManager;
private Map<Enum<?>, List<AbstractDaemonClient>> daemons =
new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
private String newConfDir = null;
private static final String CONF_HADOOP_LOCAL_DIR =
"test.system.hdrc.hadoop.local.confdir";
private final static Object waitLock = new Object();
/**
* Constructor to create a cluster client.<br/>
*
* @param conf
* Configuration to be used while constructing the cluster.
* @param rcluster
* process manger instance to be used for managing the daemons.
*
* @throws IOException
*/
public AbstractDaemonCluster(Configuration conf,
ClusterProcessManager rcluster) throws IOException {
this.conf = conf;
this.clusterManager = rcluster;
createAllClients();
}
/**
* The method returns the cluster manager. The system test cases require an
* instance of HadoopDaemonRemoteCluster to invoke certain operation on the
* daemon.
*
* @return instance of clusterManager
*/
public ClusterProcessManager getClusterManager() {
return clusterManager;
}
protected void createAllClients() throws IOException {
for (RemoteProcess p : clusterManager.getAllProcesses()) {
List<AbstractDaemonClient> dms = daemons.get(p.getRole());
if (dms == null) {
dms = new ArrayList<AbstractDaemonClient>();
daemons.put(p.getRole(), dms);
}
dms.add(createClient(p));
}
}
/**
* Method to create the daemon client.<br/>
*
* @param process
* to manage the daemon.
* @return instance of the daemon client
*
* @throws IOException
*/
protected abstract AbstractDaemonClient<DaemonProtocol>
createClient(RemoteProcess process) throws IOException;
/**
* Get the global cluster configuration which was used to create the
* cluster. <br/>
*
* @return global configuration of the cluster.
*/
public Configuration getConf() {
return conf;
}
/**
*
/**
* Return the client handle of all the Daemons.<br/>
*
* @return map of role to daemon clients' list.
*/
public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
return daemons;
}
/**
* Checks if the cluster is ready for testing. <br/>
* Algorithm for checking is as follows : <br/>
* <ul>
* <li> Wait for Daemon to come up </li>
* <li> Check if daemon is ready </li>
* <li> If one of the daemon is not ready, return false </li>
* </ul>
*
* @return true if whole cluster is ready.
*
* @throws IOException
*/
public boolean isReady() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
waitForDaemon(daemon);
if (!daemon.isReady()) {
return false;
}
}
}
return true;
}
protected void waitForDaemon(AbstractDaemonClient d) {
final int TEN_SEC = 10000;
while(true) {
try {
LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
LOG.info("Daemon might not be " +
"ready or the call to setReady() method hasn't been " +
"injected to " + d.getClass() + " ");
d.connect();
break;
} catch (IOException e) {
try {
Thread.sleep(TEN_SEC);
} catch (InterruptedException ie) {
}
}
}
}
/**
* Starts the cluster daemons.
* @throws IOException
*/
public void start() throws IOException {
clusterManager.start();
}
/**
* Stops the cluster daemons.
* @throws IOException
*/
public void stop() throws IOException {
clusterManager.stop();
}
/**
* Connect to daemon RPC ports.
* @throws IOException
*/
public void connect() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
daemon.connect();
}
}
}
/**
* Disconnect to daemon RPC ports.
* @throws IOException
*/
public void disconnect() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
daemon.disconnect();
}
}
}
/**
* Ping all the daemons of the cluster.
* @throws IOException
*/
public void ping() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
daemon.ping();
}
}
}
/**
* Connect to the cluster and ensure that it is clean to run tests.
* @throws Exception
*/
public void setUp() throws Exception {
while (!isReady()) {
Thread.sleep(1000);
}
connect();
ping();
clearAllControlActions();
ensureClean();
populateExceptionCounts();
}
/**
* This is mainly used for the test cases to set the list of exceptions
* that will be excluded.
* @param excludeExpList list of exceptions to exclude
*/
public void setExcludeExpList(String [] excludeExpList) {
this.excludeExpList = excludeExpList;
}
public void clearAllControlActions() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
daemon.getProxy().clearActions();
}
}
}
/**
* Ensure that the cluster is clean to run tests.
* @throws IOException
*/
public void ensureClean() throws IOException {
}
/**
* Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
* @throws IOException
*/
public void tearDown() throws IOException {
ensureClean();
clearAllControlActions();
assertNoExceptionMessages();
disconnect();
}
/**
* Populate the exception counts in all the daemons so that it can be checked when
* the testcase has finished running.<br/>
* @throws IOException
*/
protected void populateExceptionCounts() throws IOException {
for(List<AbstractDaemonClient> lst : daemons.values()) {
for(AbstractDaemonClient d : lst) {
d.populateExceptionCount(excludeExpList);
}
}
}
/**
* Assert no exception has been thrown during the sequence of the actions.
* <br/>
* @throws IOException
*/
protected void assertNoExceptionMessages() throws IOException {
for(List<AbstractDaemonClient> lst : daemons.values()) {
for(AbstractDaemonClient d : lst) {
d.assertNoExceptionsOccurred(excludeExpList);
}
}
}
/**
* Get the proxy user definitions from cluster from configuration.
* @return ProxyUserDefinitions - proxy users data like groups and hosts.
* @throws Exception - if no proxy users found in config.
*/
public ProxyUserDefinitions getHadoopProxyUsers() throws
Exception {
Iterator itr = conf.iterator();
ArrayList<String> proxyUsers = new ArrayList<String>();
while (itr.hasNext()) {
if (itr.next().toString().indexOf("hadoop.proxyuser") >= 0 &&
itr.next().toString().indexOf("groups=") >= 0) {
proxyUsers.add(itr.next().toString().split("\\.")[2]);
}
}
if (proxyUsers.size() == 0) {
LOG.error("No proxy users found in the configuration.");
throw new Exception("No proxy users found in the configuration.");
}
ProxyUserDefinitions pud = new ProxyUserDefinitions() {
@Override
public boolean writeToFile(URI filePath) throws IOException {
throw new UnsupportedOperationException("No such method exists.");
};
};
for (String userName : proxyUsers) {
List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." +
userName + ".groups").split("//,"));
List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." +
userName + ".hosts").split("//,"));
ProxyUserDefinitions.GroupsAndHost definitions =
pud.new GroupsAndHost();
definitions.setGroups(groups);
definitions.setHosts(hosts);
pud.addProxyUser(userName, definitions);
}
return pud;
}
/**
* It's a local folder where the config file stores temporarily
* while serializing the object.
* @return String temporary local folder path for configuration.
*/
private String getHadoopLocalConfDir() {
String hadoopLocalConfDir = conf.get(CONF_HADOOP_LOCAL_DIR);
if (hadoopLocalConfDir == null || hadoopLocalConfDir.isEmpty()) {
LOG.error("No configuration "
+ "for the CONF_HADOOP_LOCAL_DIR passed");
throw new IllegalArgumentException(
"No Configuration passed for hadoop conf local directory");
}
return hadoopLocalConfDir;
}
/**
* It uses to restart the cluster with new configuration at runtime.<br/>
* @param props attributes for new configuration.
* @param configFile configuration file.
* @throws IOException if an I/O error occurs.
*/
public void restartClusterWithNewConfig(Hashtable<String,?> props,
String configFile) throws IOException {
String mapredConf = null;
String localDirPath = null;
File localFolderObj = null;
File xmlFileObj = null;
String confXMLFile = null;
Configuration initConf = new Configuration(getConf());
Enumeration<String> e = props.keys();
while (e.hasMoreElements()) {
String propKey = e.nextElement();
Object propValue = props.get(propKey);
initConf.set(propKey,propValue.toString());
}
localDirPath = getHadoopLocalConfDir();
localFolderObj = new File(localDirPath);
if (!localFolderObj.exists()) {
localFolderObj.mkdir();
}
confXMLFile = localDirPath + File.separator + configFile;
xmlFileObj = new File(confXMLFile);
initConf.writeXml(new FileOutputStream(xmlFileObj));
newConfDir = clusterManager.pushConfig(localDirPath);
stop();
waitForClusterToStop();
clusterManager.start(newConfDir);
waitForClusterToStart();
localFolderObj.delete();
}
/**
* It uses to restart the cluster with default configuration.<br/>
* @throws IOException if an I/O error occurs.
*/
public void restart() throws
IOException {
stop();
waitForClusterToStop();
start();
waitForClusterToStart();
cleanupNewConf(newConfDir);
}
/**
* It uses to delete the new configuration folder.
* @param path - configuration directory path.
* @throws IOException if an I/O error occurs.
*/
public void cleanupNewConf(String path) throws IOException {
File file = new File(path);
file.delete();
}
/**
* It uses to wait until the cluster is stopped.<br/>
* @throws IOException if an I/O error occurs.
*/
public void waitForClusterToStop() throws
IOException {
List<Thread> chkDaemonStop = new ArrayList<Thread>();
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
DaemonStopThread dmStop = new DaemonStopThread(daemon);
chkDaemonStop.add(dmStop);
dmStop.start();
}
}
for (Thread daemonThread : chkDaemonStop){
try {
daemonThread.join();
} catch(InterruptedException intExp) {
LOG.warn("Interrupted while thread is joining." + intExp.getMessage());
}
}
}
/**
* It uses to wait until the cluster is started.<br/>
* @throws IOException if an I/O error occurs.
*/
public void waitForClusterToStart() throws
IOException {
List<Thread> chkDaemonStart = new ArrayList<Thread>();
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
DaemonStartThread dmStart = new DaemonStartThread(daemon);
chkDaemonStart.add(dmStart);;
dmStart.start();
}
}
for (Thread daemonThread : chkDaemonStart){
try {
daemonThread.join();
} catch(InterruptedException intExp) {
LOG.warn("Interrupted while thread is joining" + intExp.getMessage());
}
}
}
/**
* It waits for specified amount of time.
* @param duration time in milliseconds.
* @throws InterruptedException if any thread interrupted the current
* thread while it is waiting for a notification.
*/
public void waitFor(long duration) {
try {
synchronized (waitLock) {
waitLock.wait(duration);
}
} catch (InterruptedException intExp) {
LOG.warn("Interrrupeted while thread is waiting" + intExp.getMessage());
}
}
class DaemonStartThread extends Thread {
private AbstractDaemonClient daemon;
public DaemonStartThread(AbstractDaemonClient daemon) {
this.daemon = daemon;
}
public void run(){
LOG.info("Waiting for Daemon " + daemon.getHostName()
+ " to come up.....");
while (true) {
try {
daemon.ping();
LOG.info("Daemon is : " + daemon.getHostName() + " pinging...");
break;
} catch (Exception exp) {
if(LOG.isDebugEnabled()) {
LOG.debug(daemon.getHostName() + " is waiting to come up.");
}
waitFor(60000);
}
}
}
}
class DaemonStopThread extends Thread {
private AbstractDaemonClient daemon;
public DaemonStopThread(AbstractDaemonClient daemon) {
this.daemon = daemon;
}
public void run() {
LOG.info("Waiting for Daemon " + daemon.getHostName()
+ " to stop.....");
while (true) {
try {
daemon.ping();
if(LOG.isDebugEnabled()) {
LOG.debug(daemon.getHostName() +" is waiting state to stop.");
}
waitFor(60000);
} catch (Exception exp) {
LOG.info("Daemon is : " + daemon.getHostName() + " stopped...");
break;
}
}
}
}
}

View File

@ -1,86 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
/**
* Class to represent a control action which can be performed on Daemon.<br/>
*
*/
public abstract class ControlAction<T extends Writable> implements Writable {
private T target;
/**
* Default constructor of the Control Action, sets the Action type to zero. <br/>
*/
public ControlAction() {
}
/**
* Constructor which sets the type of the Control action to a specific type. <br/>
*
* @param target
* of the control action.
*/
public ControlAction(T target) {
this.target = target;
}
/**
* Gets the id of the control action <br/>
*
* @return target of action
*/
public T getTarget() {
return target;
}
@Override
public void readFields(DataInput in) throws IOException {
target.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
target.write(out);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ControlAction) {
ControlAction<T> other = (ControlAction<T>) obj;
return (this.target.equals(other.getTarget()));
} else {
return false;
}
}
@Override
public String toString() {
return "Action Target : " + this.target;
}
}

View File

@ -1,204 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.fs.permission.FsPermission;
/**
* RPC interface of a given Daemon.
*/
public interface DaemonProtocol extends VersionedProtocol{
long versionID = 1L;
/**
* Returns the Daemon configuration.
* @return Configuration
* @throws IOException in case of errors
*/
Configuration getDaemonConf() throws IOException;
/**
* Check if the Daemon is alive.
*
* @throws IOException
* if Daemon is unreachable.
*/
void ping() throws IOException;
/**
* Check if the Daemon is ready to accept RPC connections.
*
* @return true if Daemon is ready to accept RPC connection.
* @throws IOException in case of errors
*/
boolean isReady() throws IOException;
/**
* Get system level view of the Daemon process.
*
* @return returns system level view of the Daemon process.
*
* @throws IOException in case of errors
*/
ProcessInfo getProcessInfo() throws IOException;
/**
* Return a file status object that represents the path.
* @param path
* given path
* @param local
* whether the path is local or not
* @return a FileStatus object
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
FileStatus getFileStatus(String path, boolean local) throws IOException;
/**
* Create a file with given permissions in a file system.
* @param path - source path where the file has to create.
* @param fileName - file name.
* @param permission - file permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
void createFile(String path, String fileName,
FsPermission permission, boolean local) throws IOException;
/**
* Create a folder with given permissions in a file system.
* @param path - source path where the file has to be creating.
* @param folderName - folder name.
* @param permission - folder permissions.
* @param local - identifying the path whether its local or not.
* @throws IOException - if an I/O error occurs.
*/
public void createFolder(String path, String folderName,
FsPermission permission, boolean local) throws IOException;
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param path
* given path
* @param local
* whether the path is local or not
* @return the statuses of the files/directories in the given patch
* @throws IOException in case of errors
*/
FileStatus[] listStatus(String path, boolean local) throws IOException;
/**
* Enables a particular control action to be performed on the Daemon <br/>
*
* @param action is a control action to be enabled.
*
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
void sendAction(ControlAction action) throws IOException;
/**
* Checks if the particular control action has be delivered to the Daemon
* component <br/>
*
* @param action to be checked.
*
* @return true if action is still in waiting queue of
* actions to be delivered.
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
boolean isActionPending(ControlAction action) throws IOException;
/**
* Removes a particular control action from the list of the actions which the
* daemon maintains. <br/>
* <i><b>Not to be directly called by Test Case or clients.</b></i>
* @param action to be removed
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
void removeAction(ControlAction action) throws IOException;
/**
* Clears out the list of control actions on the particular daemon.
* <br/>
* @throws IOException in case of errors
*/
void clearActions() throws IOException;
/**
* Gets a list of pending actions which are targeted on the specified key.
* <br/>
* <i><b>Not to be directly used by clients</b></i>
* @param key target
* @return list of actions.
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
ControlAction[] getActions(Writable key) throws IOException;
/**
* Gets the number of times a particular pattern has been found in the
* daemons log file.<br/>
* <b><i>Please note that search spans across all previous messages of
* Daemon, so better practice is to get previous counts before an operation
* and then re-check if the sequence of action has caused any problems</i></b>
* @param pattern to look for in the damon's log file
* @param list of exceptions to ignore
* @return number of times the pattern if found in log file.
* @throws IOException in case of errors
*/
int getNumberOfMatchesInLogFile(String pattern, String[] list)
throws IOException;
/**
* Gets the user who started the particular daemon initially. <br/>
*
* @return user who started the particular daemon.
* @throws IOException in case of errors
*/
String getDaemonUser() throws IOException;
/**
* It uses for suspending the process.
* @param pid process id.
* @return true if the process is suspended otherwise false.
* @throws IOException if an I/O error occurs.
*/
boolean suspendProcess(String pid) throws IOException;
/**
* It uses for resuming the suspended process.
* @param pid process id
* @return true if suspended process is resumed otherwise false.
* @throws IOException if an I/O error occurs.
*/
boolean resumeProcess(String pid) throws IOException;
}

View File

@ -1,77 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.util.Map;
import org.apache.hadoop.io.Writable;
/**
* Daemon system level process information.
*/
public interface ProcessInfo extends Writable {
/**
* Get the current time in the millisecond.<br/>
*
* @return current time on daemon clock in millisecond.
*/
public long currentTimeMillis();
/**
* Get the environment that was used to start the Daemon process.<br/>
*
* @return the environment variable list.
*/
public Map<String,String> getEnv();
/**
* Get the System properties of the Daemon process.<br/>
*
* @return the properties list.
*/
public Map<String,String> getSystemProperties();
/**
* Get the number of active threads in Daemon VM.<br/>
*
* @return number of active threads in Daemon VM.
*/
public int activeThreadCount();
/**
* Get the maximum heap size that is configured for the Daemon VM. <br/>
*
* @return maximum heap size.
*/
public long maxMemory();
/**
* Get the free memory in Daemon VM.<br/>
*
* @return free memory.
*/
public long freeMemory();
/**
* Get the total used memory in Demon VM. <br/>
*
* @return total used memory.
*/
public long totalMemory();
}

View File

@ -1,159 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class ProcessInfoImpl implements ProcessInfo {
private int threadCount;
private long currentTime;
private long freemem;
private long maxmem;
private long totmem;
private Map<String, String> env;
private Map<String, String> props;
public ProcessInfoImpl() {
env = new HashMap<String, String>();
props = new HashMap<String, String>();
}
/**
* Construct a concrete process information object. <br/>
*
* @param threadCount
* count of threads.
* @param currentTime
* @param freemem
* @param maxmem
* @param totmem
* @param env environment list.
* @param props
*/
public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
long maxmem, long totmem, Map<String, String> env,
Map<String, String> props) {
this.threadCount = threadCount;
this.currentTime = currentTime;
this.freemem = freemem;
this.maxmem = maxmem;
this.totmem = totmem;
this.env = env;
this.props = props;
}
@Override
public int activeThreadCount() {
return threadCount;
}
@Override
public long currentTimeMillis() {
return currentTime;
}
@Override
public long freeMemory() {
return freemem;
}
@Override
public Map<String, String> getEnv() {
return env;
}
@Override
public Map<String,String> getSystemProperties() {
return props;
}
@Override
public long maxMemory() {
return maxmem;
}
@Override
public long totalMemory() {
return totmem;
}
@Override
public void readFields(DataInput in) throws IOException {
this.threadCount = in.readInt();
this.currentTime = in.readLong();
this.freemem = in.readLong();
this.maxmem = in.readLong();
this.totmem = in.readLong();
read(in, env);
read(in, props);
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(threadCount);
out.writeLong(currentTime);
out.writeLong(freemem);
out.writeLong(maxmem);
out.writeLong(totmem);
write(out, env);
write(out, props);
}
private void read(DataInput in, Map<String, String> map) throws IOException {
int size = in.readInt();
for (int i = 0; i < size; i = i + 2) {
String key = in.readUTF();
String value = in.readUTF();
map.put(key, value);
}
}
private void write(DataOutput out, Map<String, String> map)
throws IOException {
int size = (map.size() * 2);
out.writeInt(size);
for (Map.Entry<String, String> entry : map.entrySet()) {
out.writeUTF(entry.getKey());
out.writeUTF(entry.getValue());
}
}
@Override
public String toString() {
StringBuffer strBuf = new StringBuffer();
strBuf.append(String.format("active threads : %d\n", threadCount));
strBuf.append(String.format("current time : %d\n", currentTime));
strBuf.append(String.format("free memory : %d\n", freemem));
strBuf.append(String.format("total memory : %d\n", totmem));
strBuf.append(String.format("max memory : %d\n", maxmem));
strBuf.append("Environment Variables : \n");
for (Map.Entry<String, String> entry : env.entrySet()) {
strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
entry.getValue()));
}
return strBuf.toString();
}
}

View File

@ -1,90 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.io.IOException;
import java.net.URI;
/**
* Its the data container which contains host names and
* groups against each proxy user.
*/
public abstract class ProxyUserDefinitions {
/**
* Groups and host names container
*/
public class GroupsAndHost {
private List<String> groups;
private List<String> hosts;
public List<String> getGroups() {
return groups;
}
public void setGroups(List<String> groups) {
this.groups = groups;
}
public List<String> getHosts() {
return hosts;
}
public void setHosts(List<String> hosts) {
this.hosts = hosts;
}
}
protected Map<String, GroupsAndHost> proxyUsers;
protected ProxyUserDefinitions () {
proxyUsers = new HashMap<String, GroupsAndHost>();
}
/**
* Add proxy user data to a container.
* @param userName - proxy user name.
* @param definitions - groups and host names.
*/
public void addProxyUser (String userName, GroupsAndHost definitions) {
proxyUsers.put(userName, definitions);
}
/**
* Get the host names and groups against given proxy user.
* @return - GroupsAndHost object.
*/
public GroupsAndHost getProxyUser (String userName) {
return proxyUsers.get(userName);
}
/**
* Get the Proxy users data which contains the host names
* and groups against each user.
* @return - the proxy users data as hash map.
*/
public Map<String, GroupsAndHost> getProxyUsers () {
return proxyUsers;
}
/**
* The implementation of this method has to be provided by a child of the class
* @param filePath
* @return
* @throws IOException
*/
public abstract boolean writeToFile(URI filePath) throws IOException;
}

View File

@ -1,99 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
/**
* Interface to manage the remote processes in the cluster.
*/
public interface ClusterProcessManager {
/**
* Initialization method to pass the configuration object which is required
* by the ClusterProcessManager to manage the cluster.<br/>
* Configuration object should typically contain all the parameters which are
* required by the implementations.<br/>
*
* @param conf configuration containing values of the specific keys which
* are required by the implementation of the cluster process manger.
*
* @throws IOException when initialization fails.
*/
void init(Configuration conf) throws IOException;
/**
* Get the list of RemoteProcess handles of all the remote processes.
*/
List<RemoteProcess> getAllProcesses();
/**
* Get all the roles this cluster's daemon processes have.
*/
Set<Enum<?>> getRoles();
/**
* Method to start all the remote daemons.<br/>
*
* @throws IOException if startup procedure fails.
*/
void start() throws IOException;
/**
* Starts the daemon from the user specified conf dir.
* @param newConfLocation the dir where the new conf files reside.
* @throws IOException if start from new conf fails.
*/
void start(String newConfLocation) throws IOException;
/**
* Stops the daemon running from user specified conf dir.
*
* @param newConfLocation the dir where the new conf files reside.
* @throws IOException if stop from new conf fails.
*/
void stop(String newConfLocation) throws IOException;
/**
* Method to shutdown all the remote daemons.<br/>
*
* @throws IOException if shutdown procedure fails.
*/
void stop() throws IOException;
/**
* Gets if multi-user support is enabled for this cluster.
* <br/>
* @return true if multi-user support is enabled.
* @throws IOException if RPC returns error.
*/
boolean isMultiUserSupported() throws IOException;
/**
* The pushConfig is used to push a new config to the daemons.
* @param localDir
* @return is the remoteDir location where config will be pushed
* @throws IOException if pushConfig fails.
*/
String pushConfig(String localDir) throws IOException;
}

View File

@ -1,404 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
/**
* The concrete class which implements the start up and shut down based routines
* based on the hadoop-daemon.sh. <br/>
*
* Class requires two keys to be present in the Configuration objects passed to
* it. Look at <code>CONF_HADOOPHOME</code> and
* <code>CONF_HADOOPCONFDIR</code> for the names of the
* configuration keys.
*
* Following will be the format which the final command execution would look :
* <br/>
* <code>
* ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName
* --config HADOOP_CONF_DIR (start|stop) command'
* </code>
*/
public abstract class HadoopDaemonRemoteCluster
implements ClusterProcessManager {
private static final Log LOG = LogFactory
.getLog(HadoopDaemonRemoteCluster.class.getName());
public static final String CONF_HADOOPNEWCONFDIR =
"test.system.hdrc.hadoopnewconfdir";
/**
* Key used to configure the HADOOP_PREFIX to be used by the
* HadoopDaemonRemoteCluster.
*/
public final static String CONF_HADOOPHOME =
"test.system.hdrc.hadoophome";
public final static String CONF_SCRIPTDIR =
"test.system.hdrc.deployed.scripts.dir";
/**
* Key used to configure the HADOOP_CONF_DIR to be used by the
* HadoopDaemonRemoteCluster.
*/
public final static String CONF_HADOOPCONFDIR =
"test.system.hdrc.hadoopconfdir";
public final static String CONF_DEPLOYED_HADOOPCONFDIR =
"test.system.hdrc.deployed.hadoopconfdir";
private String hadoopHome;
protected String hadoopConfDir;
protected String scriptsDir;
protected String hadoopNewConfDir;
private final Set<Enum<?>> roles;
private final List<HadoopDaemonInfo> daemonInfos;
private List<RemoteProcess> processes;
protected Configuration conf;
public static class HadoopDaemonInfo {
public final String cmd;
public final Enum<?> role;
public final List<String> hostNames;
public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
super();
this.cmd = cmd;
this.role = role;
this.hostNames = hostNames;
}
public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile)
throws IOException {
super();
this.cmd = cmd;
this.role = role;
File file = new File(getDeployedHadoopConfDir(), hostFile);
BufferedReader reader = null;
hostNames = new ArrayList<String>();
try {
reader = new BufferedReader(new FileReader(file));
String host = null;
while ((host = reader.readLine()) != null) {
if (host.trim().isEmpty() || host.startsWith("#")) {
// Skip empty and possible comment lines
// throw new IllegalArgumentException(
// "Hostname could not be found in file " + hostFile);
continue;
}
hostNames.add(host.trim());
}
if (hostNames.size() < 1) {
throw new IllegalArgumentException("At least one hostname "
+
"is required to be present in file - " + hostFile);
}
} finally {
try {
reader.close();
} catch (IOException e) {
LOG.warn("Could not close reader");
}
}
LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from "
+ hostFile);
}
}
@Override
public String pushConfig(String localDir) throws IOException {
for (RemoteProcess process : processes){
process.pushConfig(localDir);
}
return hadoopNewConfDir;
}
public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
this.daemonInfos = daemonInfos;
this.roles = new HashSet<Enum<?>>();
for (HadoopDaemonInfo info : daemonInfos) {
this.roles.add(info.role);
}
}
@Override
public void init(Configuration conf) throws IOException {
this.conf = conf;
populateDirectories(conf);
this.processes = new ArrayList<RemoteProcess>();
populateDaemons();
}
@Override
public List<RemoteProcess> getAllProcesses() {
return processes;
}
@Override
public Set<Enum<?>> getRoles() {
return roles;
}
/**
* Method to populate the hadoop home and hadoop configuration directories.
*
* @param conf
* Configuration object containing values for
* CONF_HADOOPHOME and
* CONF_HADOOPCONFDIR
*
* @throws IllegalArgumentException
* if the configuration or system property set does not contain
* values for the required keys.
*/
protected void populateDirectories(Configuration conf) {
hadoopHome = conf.get(CONF_HADOOPHOME);
hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
scriptsDir = conf.get(CONF_SCRIPTDIR);
hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
|| hadoopConfDir.isEmpty()) {
LOG.error("No configuration "
+ "for the HADOOP_PREFIX and HADOOP_CONF_DIR passed");
throw new IllegalArgumentException(
"No Configuration passed for hadoop home " +
"and hadoop conf directories");
}
}
public static String getDeployedHadoopConfDir() {
String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
if (dir == null || dir.isEmpty()) {
LOG.error("No configuration "
+ "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
throw new IllegalArgumentException(
"No Configuration passed for hadoop deployed conf directory");
}
return dir;
}
@Override
public void start() throws IOException {
for (RemoteProcess process : processes) {
process.start();
}
}
@Override
public void start(String newConfLocation)throws IOException {
for (RemoteProcess process : processes) {
process.start(newConfLocation);
}
}
@Override
public void stop() throws IOException {
for (RemoteProcess process : processes) {
process.kill();
}
}
@Override
public void stop(String newConfLocation) throws IOException {
for (RemoteProcess process : processes) {
process.kill(newConfLocation);
}
}
protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
for (String host : info.hostNames) {
InetAddress addr = InetAddress.getByName(host);
RemoteProcess process = getProcessManager(info,
addr.getCanonicalHostName());
processes.add(process);
}
}
protected void populateDaemons() throws IOException {
for (HadoopDaemonInfo info : daemonInfos) {
populateDaemon(info);
}
}
@Override
public boolean isMultiUserSupported() throws IOException {
return false;
}
protected RemoteProcess getProcessManager(
HadoopDaemonInfo info, String hostName) {
RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
return process;
}
/**
* The core daemon class which actually implements the remote process
* management of actual daemon processes in the cluster.
*
*/
class ScriptDaemon implements RemoteProcess {
private static final String STOP_COMMAND = "stop";
private static final String START_COMMAND = "start";
private static final String SCRIPT_NAME = "hadoop-daemon.sh";
private static final String PUSH_CONFIG ="pushConfig.sh";
protected final String daemonName;
protected final String hostName;
private final Enum<?> role;
public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
this.daemonName = daemonName;
this.hostName = hostName;
this.role = role;
}
@Override
public String getHostName() {
return hostName;
}
private String[] getPushConfigCommand(String localDir, String remoteDir,
File scriptDir) throws IOException{
ArrayList<String> cmdArgs = new ArrayList<String>();
cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
cmdArgs.add(localDir);
cmdArgs.add(hostName);
cmdArgs.add(remoteDir);
cmdArgs.add(hadoopConfDir);
return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
}
private ShellCommandExecutor buildPushConfig(String local, String remote )
throws IOException {
File scriptDir = new File(scriptsDir);
String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
HashMap<String, String> env = new HashMap<String, String>();
ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
scriptDir, env);
LOG.info(executor.toString());
return executor;
}
private ShellCommandExecutor createNewConfDir() throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
cmdArgs.add("ssh");
cmdArgs.add(hostName);
cmdArgs.add("if [ -d "+ hadoopNewConfDir+
" ];\n then echo Will remove existing directory; rm -rf "+
hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
"echo " + hadoopNewConfDir + " doesnt exist hence creating" +
"; mkdir " + hadoopNewConfDir + ";\n fi");
String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
LOG.info(executor.toString());
return executor;
}
@Override
public void pushConfig(String localDir) throws IOException {
createNewConfDir().execute();
buildPushConfig(localDir, hadoopNewConfDir).execute();
}
private ShellCommandExecutor buildCommandExecutor(String command,
String confDir) {
String[] commandArgs = getCommand(command, confDir);
File cwd = new File(".");
HashMap<String, String> env = new HashMap<String, String>();
env.put("HADOOP_CONF_DIR", confDir);
ShellCommandExecutor executor
= new ShellCommandExecutor(commandArgs, cwd, env);
LOG.info(executor.toString());
return executor;
}
private File getBinDir() {
File binDir = new File(hadoopHome, "bin");
return binDir;
}
protected String[] getCommand(String command, String confDir) {
ArrayList<String> cmdArgs = new ArrayList<String>();
File binDir = getBinDir();
cmdArgs.add("ssh");
cmdArgs.add(hostName);
cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
cmdArgs.add("--config");
cmdArgs.add(confDir);
// XXX Twenty internal version does not support --script option.
cmdArgs.add(command);
cmdArgs.add(daemonName);
return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
}
@Override
public void kill() throws IOException {
kill(hadoopConfDir);
}
@Override
public void start() throws IOException {
start(hadoopConfDir);
}
public void start(String newConfLocation) throws IOException {
ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
newConfLocation);
cme.execute();
String output = cme.getOutput();
if (!output.isEmpty()) { //getOutput() never returns null value
if (output.toLowerCase().contains("error")) {
LOG.warn("Error is detected.");
throw new IOException("Start error\n" + output);
}
}
}
public void kill(String newConfLocation) throws IOException {
ShellCommandExecutor cme
= buildCommandExecutor(STOP_COMMAND, newConfLocation);
cme.execute();
String output = cme.getOutput();
if (!output.isEmpty()) { //getOutput() never returns null value
if (output.toLowerCase().contains("error")) {
LOG.info("Error is detected.");
throw new IOException("Kill error\n" + output);
}
}
}
@Override
public Enum<?> getRole() {
return role;
}
}
}

View File

@ -1,96 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
public abstract class MultiUserHadoopDaemonRemoteCluster
extends HadoopDaemonRemoteCluster {
public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
super(daemonInfos);
}
@Override
protected RemoteProcess getProcessManager(
HadoopDaemonInfo info, String hostName) {
return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
}
@Override
public boolean isMultiUserSupported() throws IOException {
return true;
}
class MultiUserScriptDaemon extends ScriptDaemon {
private static final String MULTI_USER_BINARY_PATH_KEY =
"test.system.hdrc.multi-user.binary.path";
private static final String MULTI_USER_MANAGING_USER =
"test.system.hdrc.multi-user.managinguser.";
private String binaryPath;
/**
* Manging user for a particular daemon is gotten by
* MULTI_USER_MANAGING_USER + daemonname
*/
private String mangingUser;
public MultiUserScriptDaemon(
String daemonName, String hostName, Enum<?> role) {
super(daemonName, hostName, role);
initialize(daemonName);
}
private void initialize(String daemonName) {
binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
if (binaryPath == null || binaryPath.trim().isEmpty()) {
throw new IllegalArgumentException(
"Binary path for multi-user path is not present. Please set "
+ MULTI_USER_BINARY_PATH_KEY + " correctly");
}
File binaryFile = new File(binaryPath);
if (!binaryFile.exists() || !binaryFile.canExecute()) {
throw new IllegalArgumentException(
"Binary file path is not configured correctly. Please set "
+ MULTI_USER_BINARY_PATH_KEY
+ " to properly configured binary file.");
}
mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
if (mangingUser == null || mangingUser.trim().isEmpty()) {
throw new IllegalArgumentException(
"Manging user for daemon not present please set : "
+ MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
}
}
@Override
protected String[] getCommand(String command,String confDir) {
ArrayList<String> commandList = new ArrayList<String>();
commandList.add(binaryPath);
commandList.add(mangingUser);
commandList.add(hostName);
commandList.add("--config "
+ confDir + " " + command + " " + daemonName);
return (String[]) commandList.toArray(new String[commandList.size()]);
}
}
}

View File

@ -1,74 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.IOException;
/**
* Interface to manage the remote process.
*/
public interface RemoteProcess {
/**
* Get the host on which the daemon process is running/stopped.<br/>
*
* @return hostname on which process is running/stopped.
*/
String getHostName();
/**
* Start a given daemon process.<br/>
*
* @throws IOException if startup fails.
*/
void start() throws IOException;
/**
* Starts a daemon from user specified conf dir.
* @param newConfLocation is dir where new conf resides.
* @throws IOException if start of process fails from new location.
*/
void start(String newConfLocation) throws IOException;
/**
* Stop a given daemon process.<br/>
*
* @throws IOException if shutdown fails.
*/
void kill() throws IOException;
/**
* Stops a given daemon running from user specified
* conf dir. </br>
* @param newConfLocation dir location where new conf resides.
* @throws IOException if kill fails from new conf location.
*/
void kill(String newConfLocation) throws IOException;
/**
* Get the role of the Daemon in the cluster.
*
* @return Enum
*/
Enum<?> getRole();
/**
* Pushed the configuration to new configuration directory
* @param localDir The local directory which has config files that will be
* pushed to the remote location
* @throws IOException is thrown if the pushConfig results in a error.
*/
void pushConfig(String localDir) throws IOException;
}

View File

@ -1,27 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
public interface RemoteExecution {
public void executeCommand (String remoteHostName, String user,
String command) throws Exception;
public int getExitCode();
public String getOutput();
public String getCommandString();
}

View File

@ -1,203 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import com.jcraft.jsch.*;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Properties;
/**
* Remote Execution of commands on a remote machine.
*/
public class SSHRemoteExecution implements RemoteExecution {
static final Log LOG = LogFactory.getLog(SSHRemoteExecution.class);
static final int SSH_PORT = 22;
static final String DEFAULT_IDENTITY="id_dsa";
static final String DEFAULT_KNOWNHOSTS="known_hosts";
static final String FS = System.getProperty("file.separator");
static final String LS = System.getProperty("line.separator");
private int exitCode;
private StringBuffer output;
private String commandString;
final StringBuffer errorMessage = new StringBuffer();
public SSHRemoteExecution() throws Exception {
}
protected String getHomeDir() {
String currentUser=System.getProperty("user.name");
String userHome=System.getProperty("user.home");
return userHome.substring(0, userHome.indexOf(currentUser)-1);
}
/**
* Execute command at remote host under given user
* @param remoteHostName remote host name
* @param user is the name of the user to be login under;
* current user will be used if this is set to <code>null</code>
* @param command to be executed remotely
* @param identityFile is the name of alternative identity file; default
* is ~user/.ssh/id_dsa
* @param portNumber remote SSH daemon port number, default is 22
* @throws Exception in case of errors
*/
public void executeCommand (String remoteHostName, String user,
String command, String identityFile, int portNumber) throws Exception {
commandString = command;
String sessionUser = System.getProperty("user.name");
String userHome=System.getProperty("user.home");
if (user != null) {
sessionUser = user;
userHome = getHomeDir() + FS + user;
}
String dotSSHDir = userHome + FS + ".ssh";
String sessionIdentity = dotSSHDir + FS + DEFAULT_IDENTITY;
if (identityFile != null) {
sessionIdentity = identityFile;
}
JSch jsch = new JSch();
Session session = jsch.getSession(sessionUser, remoteHostName, portNumber);
jsch.setKnownHosts(dotSSHDir + FS + DEFAULT_KNOWNHOSTS);
jsch.addIdentity(sessionIdentity);
Properties config = new Properties();
config.put("StrictHostKeyChecking", "no");
session.setConfig(config);
session.connect(30000); // making a connection with timeout.
Channel channel=session.openChannel("exec");
((ChannelExec)channel).setCommand(command);
channel.setInputStream(null);
final BufferedReader errReader =
new BufferedReader(
new InputStreamReader(((ChannelExec)channel).getErrStream()));
BufferedReader inReader =
new BufferedReader(new InputStreamReader(channel.getInputStream()));
channel.connect();
Thread errorThread = new Thread() {
@Override
public void run() {
try {
String line = errReader.readLine();
while((line != null) && !isInterrupted()) {
errorMessage.append(line);
errorMessage.append(LS);
line = errReader.readLine();
}
} catch(IOException ioe) {
LOG.warn("Error reading the error stream", ioe);
}
}
};
try {
errorThread.start();
} catch (IllegalStateException e) {
LOG.debug(e);
}
try {
parseExecResult(inReader);
String line = inReader.readLine();
while (line != null) {
line = inReader.readLine();
}
if(channel.isClosed()) {
exitCode = channel.getExitStatus();
LOG.debug("exit-status: " + exitCode);
}
try {
// make sure that the error thread exits
errorThread.join();
} catch (InterruptedException ie) {
LOG.warn("Interrupted while reading the error stream", ie);
}
} catch (Exception ie) {
throw new IOException(ie.toString());
}
finally {
try {
inReader.close();
} catch (IOException ioe) {
LOG.warn("Error while closing the input stream", ioe);
}
try {
errReader.close();
} catch (IOException ioe) {
LOG.warn("Error while closing the error stream", ioe);
}
channel.disconnect();
session.disconnect();
}
}
/**
* Execute command at remote host under given username
* Default identity is ~/.ssh/id_dsa key will be used
* Default known_hosts file is ~/.ssh/known_hosts will be used
* @param remoteHostName remote host name
* @param user is the name of the user to be login under;
* if equals to <code>null</code> then current user name will be used
* @param command to be executed remotely
*/
@Override
public void executeCommand (String remoteHostName, String user,
String command) throws Exception {
executeCommand(remoteHostName, user, command, null, SSH_PORT);
}
@Override
public int getExitCode() {
return exitCode;
}
protected void parseExecResult(BufferedReader lines) throws IOException {
output = new StringBuffer();
char[] buf = new char[512];
int nRead;
while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
output.append(buf, 0, nRead);
}
}
/** Get the output of the ssh command.*/
@Override
public String getOutput() {
return (output == null) ? "" : output.toString();
}
/** Get the String representation of ssh command */
@Override
public String getCommandString() {
return commandString;
}
}

View File

@ -1,48 +0,0 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# local folder with new configuration file
LOCAL_DIR=$1
# remote daemon host
HOST=$2
#remote dir points to the location of new config files
REMOTE_DIR=$3
# remote daemon HADOOP_CONF_DIR location
DAEMON_HADOOP_CONF_DIR=$4
if [ $# -ne 4 ]; then
echo "Wrong number of parameters" >&2
exit 2
fi
ret_value=0
echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
echo and populates it with new configs prepared in $LOCAL_DIR
ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
ret_value=$?
# make sure files are writeble
ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
# copy new files over
scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
err_code=`echo $? + $ret_value | bc`
echo Copying of files from local to remote returned ${err_code}

View File

@ -1,50 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class TestSSHRemoteExecution {
@Test
/**
* Method: executeCommand(String remoteHostName, String user, String command)
*/
public void testExecuteCommandForRemoteHostNameUserCommand() throws Exception {
String command = "ls -l /bin";
SSHRemoteExecution sshRE = new SSHRemoteExecution();
sshRE.executeCommand("localhost", null, "ls -l /bin");
System.out.println(sshRE.getOutput());
assertEquals("Exit code should is expected to be 0", sshRE.getExitCode(), 0);
assertEquals("Mismatched command string", sshRE.getCommandString(), command);
}
@Test
/**
* Method: getHomeDir()
*/
public void testGetHomeDir() throws Exception {
SSHRemoteExecution sshRE = new SSHRemoteExecution();
String ret = sshRE.getHomeDir();
assertEquals(System.getProperty("user.home"),
ret + System.getProperty("file.separator") +
System.getProperty("user.name"));
}
}

View File

@ -1,63 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.util.ArrayList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.test.system.DaemonProtocol;
import org.apache.hadoop.hdfs.test.system.DNProtocol;
import org.apache.hadoop.hdfs.test.system.NNProtocol;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
/**
* This aspect adds two HDFS Herriot specific protocols tp the list of 'authorized'
* Herriot protocols.
* Protocol descriptors i.e. 'security.nn.protocol.acl' have to be added to
* <code>hadoop-policy.xml</code> if present
*/
public privileged aspect HDFSPolicyProviderAspect {
private static final Log LOG = LogFactory
.getLog(HDFSPolicyProviderAspect.class);
ArrayList<Service> herriotHDFSServices = null;
pointcut updateHDFSServices() :
execution (public Service[] HDFSPolicyProvider.getServices());
Service[] around() : updateHDFSServices () {
herriotHDFSServices = new ArrayList<Service>();
for (Service s : HDFSPolicyProvider.hdfsServices) {
LOG.debug("Copying configured protocol to "
+ s.getProtocol().getCanonicalName());
herriotHDFSServices.add(s);
}
herriotHDFSServices.add(new Service("security.daemon.protocol.acl",
DaemonProtocol.class));
herriotHDFSServices.add(new Service("security.nn.protocol.acl",
NNProtocol.class));
herriotHDFSServices.add(new Service("security.dn.protocol.acl",
DNProtocol.class));
final Service[] retArray = herriotHDFSServices
.toArray(new Service[herriotHDFSServices.size()]);
LOG.debug("Number of configured protocols to return: " + retArray.length);
return retArray;
}
}

View File

@ -1,70 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import java.io.File;
import java.io.IOException;
import java.util.AbstractList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.test.system.DNProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.system.DaemonProtocol;
import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
public privileged aspect DataNodeAspect {
declare parents : DataNode implements DNProtocol;
public Configuration DataNode.getDaemonConf() {
return super.getConf();
}
pointcut dnConstructorPointcut(Configuration conf, AbstractList<File> dirs,
SecureResources resources) :
call(DataNode.new(Configuration, AbstractList<File>, SecureResources))
&& args(conf, dirs, resources);
after(Configuration conf, AbstractList<File> dirs, SecureResources resources)
returning (DataNode datanode):
dnConstructorPointcut(conf, dirs, resources) {
try {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
datanode.setUser(ugi.getShortUserName());
} catch (IOException e) {
datanode.LOG.warn("Unable to get the user information for the " +
"DataNode");
}
datanode.setReady(true);
}
pointcut getVersionAspect(String protocol, long clientVersion) :
execution(public long DataNode.getProtocolVersion(String ,
long) throws IOException) && args(protocol, clientVersion);
long around(String protocol, long clientVersion) :
getVersionAspect(protocol, clientVersion) {
if(protocol.equals(DaemonProtocol.class.getName())) {
return DaemonProtocol.versionID;
} else if(protocol.equals(DNProtocol.class.getName())) {
return DNProtocol.versionID;
} else {
return proceed(protocol, clientVersion);
}
}
}

View File

@ -1,77 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.test.system.NNProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.system.DaemonProtocol;
public privileged aspect NameNodeAspect {
declare parents : NameNode implements NNProtocol;
// Namename doesn't store a copy of its configuration
// because it can be changed through the life cycle of the object
// So, the an exposed reference needs to be added and updated after
// new NameNode(Configuration conf) is complete
Configuration NameNode.configRef = null;
// Method simply assign a reference to the NameNode configuration object
void NameNode.setRef (Configuration conf) {
if (configRef == null)
configRef = conf;
}
public Configuration NameNode.getDaemonConf() {
return configRef;
}
pointcut nnConstructorPointcut(Configuration conf) :
call(NameNode.new(Configuration)) && args(conf);
after(Configuration conf) returning (NameNode namenode):
nnConstructorPointcut(conf) {
try {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
namenode.setUser(ugi.getShortUserName());
} catch (IOException e) {
namenode.LOG.warn("Unable to get the user information for the " +
"Jobtracker");
}
namenode.setRef(conf);
namenode.setReady(true);
}
pointcut getVersionAspect(String protocol, long clientVersion) :
execution(public long NameNode.getProtocolVersion(String ,
long) throws IOException) && args(protocol, clientVersion);
long around(String protocol, long clientVersion) :
getVersionAspect(protocol, clientVersion) {
if(protocol.equals(DaemonProtocol.class.getName())) {
return DaemonProtocol.versionID;
} else if(protocol.equals(NNProtocol.class.getName())) {
return NNProtocol.versionID;
} else {
return proceed(protocol, clientVersion);
}
}
}

View File

@ -1,147 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- Mandatory properties that are to be set and uncommented before running the tests -->
<property>
<name>test.system.hdrc.hadoophome</name>
<value>$(TO_DO_HADOOP_INSTALL)/share/hadoop-current</value>
<description> This is the path to the home directory of the hadoop deployment.
</description>
</property>
<property>
<name>test.system.hdrc.hadoopconfdir</name>
<value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop</value>
<description> This is the path to the configuration directory of the hadoop
cluster that is deployed.
</description>
</property>
<property>
<name>test.system.hdrc.dn.hostfile</name>
<value>slaves.localcopy.txt</value>
<description> File name containing the hostnames where the DataNodes are running.
</description>
</property>
<property>
<name>test.system.hdfs.clusterprocess.impl.class</name>
<value>org.apache.hadoop.hdfs.test.system.HDFSCluster$HDFSProcessManager</value>
<description>
Cluster process manager for the Hdfs subsystem of the cluster. The value
org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager can
be used to enable multi user support.
</description>
</property>
<property>
<name>test.system.hdrc.deployed.scripts.dir</name>
<value>./src/test/system/scripts</value>
<description>
This directory hosts the scripts in the deployed location where
the system test client runs.
</description>
</property>
<property>
<name>test.system.hdrc.hadoopnewconfdir</name>
<value>$(TO_DO_GLOBAL_TMP_DIR)/newconf</value>
<description>
The directory where the new config files will be copied to in all
the clusters is pointed out this directory.
</description>
</property>
<property>
<name>test.system.hdrc.suspend.cmd</name>
<value>kill -SIGSTOP</value>
<description>
Command for suspending the given process.
</description>
</property>
<property>
<name>test.system.hdrc.resume.cmd</name>
<value>kill -SIGCONT</value>
<description>
Command for resuming the given suspended process.
</description>
</property>
<property>
<name>test.system.hdrc.hadoop.local.confdir</name>
<value>$(TO_DO_GLOBAL_TMP_DIR)/localconf</value>
<description>
A local directory where a new config file is placed before
being pushed into new config location on the cluster.
</description>
</property>
<!-- Mandatory keys to be set for the multi user support to be enabled. -->
<property>
<name>test.system.hdfs.clusterprocess.impl.class</name>
<value>org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager</value>
<description>
Enabling multi user based cluster process manger.
</description>
</property>
<property>
<name>test.system.hdrc.multi-user.list.path</name>
<value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers</value>
<description>
Multi user list for creating the proxy users.
</description>
</property>
<property>
<name>test.system.hdrc.multi-user.binary.path</name>
<value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs</value>
<description>
Local file system path on gate way to cluster-controller binary including the binary name.
To build the binary the following commands need to be executed:
% ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
% cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
Location of the cluster is important security precaution.
The binary should be owned by root and test user group permission should be set such a
way that it can be executed by binary. Example usage would be:
% sudo chown root binary
% sudo chmod 6511 binary
Change permission appropriately to make it more secure.
</description>
</property>
<property>
<name>test.system.hdrc.multi-user.managinguser.namenode</name>
<value>*</value>
<description>
User value for managing the particular daemon, please note that these user should be
present on gateways also, an example configuration for the above would be
key name = test.system.hdrc.multi-user.managinguser.namenode
key value = guest
Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command.
</description>
</property>
<property>
<name>test.system.hdrc.multi-user.managinguser.datanode</name>
<value>*</value>
</property>
</configuration>

View File

@ -1,99 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.system.process.RemoteProcess;
/**
* Datanode client for system tests. Assumption of the class is that the
* configuration key is set for the configuration key : {@code
* DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY} is set, only the port portion of
* the address is used.
*/
public class DNClient extends HDFSDaemonClient<DNProtocol> {
DNProtocol proxy;
private static final String HADOOP_DATANODE_OPTS_ENV = "HADOOP_DATANODE_OPTS";
public DNClient(Configuration conf, RemoteProcess process) throws IOException {
super(conf, process);
}
@Override
public void connect() throws IOException {
if (isConnected()) {
return;
}
String sockAddrStr = getConf().get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY);
if (sockAddrStr == null) {
throw new IllegalArgumentException("Datenode IPC address is not set."
+ "Check if " + DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY
+ " is configured.");
}
String[] splits = sockAddrStr.split(":");
if (splits.length != 2) {
throw new IllegalArgumentException(
"Datanode IPC address is not correctly configured");
}
String port = splits[1];
String sockAddr = getHostName() + ":" + port;
InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
proxy = (DNProtocol) RPC.getProxy(DNProtocol.class, DNProtocol.versionID,
bindAddr, getConf());
setConnected(true);
}
@Override
public void disconnect() throws IOException {
RPC.stopProxy(proxy);
setConnected(false);
}
@Override
protected DNProtocol getProxy() {
return proxy;
}
public Configuration getDatanodeConfig() throws IOException {
return getProxy().getDaemonConf();
}
@Override
public String getHadoopOptsEnvName() {
return HADOOP_DATANODE_OPTS_ENV;
}
/**
* Concrete implementation of abstract super class method
* @param attributeName name of the attribute to be retrieved
* @return Object value of the given attribute
* @throws IOException is thrown in case of communication errors
*/
@Override
public Object getDaemonAttribute (String attributeName) throws IOException {
return getJmxAttribute("DataNode", "DataNodeInfo", attributeName);
}
}

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.test.system.DaemonProtocol;
/**
* Client side API exposed from Datanode.
* Actual implementations are likely to be injected
*
* The protocol has to be annotated so KerberosInfo can be filled in during
* creation of a ipc.Client connection
*/
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
public interface DNProtocol extends DaemonProtocol {
public static final long versionID = 1L;
}

View File

@ -1,149 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.test.system.AbstractDaemonClient;
import org.apache.hadoop.test.system.AbstractDaemonCluster;
import org.apache.hadoop.test.system.process.ClusterProcessManager;
import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
import org.apache.hadoop.test.system.process.RemoteProcess;
import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
public class HDFSCluster extends AbstractDaemonCluster {
static {
Configuration.addDefaultResource("hdfs-site.xml");
}
private static final Log LOG = LogFactory.getLog(HDFSCluster.class);
public static final String CLUSTER_PROCESS_MGR_IMPL =
"test.system.hdfs.clusterprocess.impl.class";
private HDFSCluster(Configuration conf, ClusterProcessManager rCluster)
throws IOException {
super(conf, rCluster);
}
/**
* Key is used to to point to the file containing hostnames of tasktrackers
*/
public static final String CONF_HADOOP_DN_HOSTFILE_NAME =
"test.system.hdrc.dn.hostfile";
private static List<HadoopDaemonInfo> hdfsDaemonInfos;
private static String nnHostName;
private static String DN_hostFileName;
protected enum Role {NN, DN}
@Override
protected AbstractDaemonClient
createClient(RemoteProcess process) throws IOException {
Enum<?> pRole = process.getRole();
if (Role.NN.equals(pRole)) {
return createNNClient(process);
} else if (Role.DN.equals(pRole)) {
return createDNClient(process);
} else throw new IOException("Role " + pRole +
" is not supported by HDFSCluster");
}
protected DNClient createDNClient(RemoteProcess dnDaemon) throws IOException {
return new DNClient(getConf(), dnDaemon);
}
protected NNClient createNNClient(RemoteProcess nnDaemon) throws IOException {
return new NNClient(getConf(), nnDaemon);
}
public NNClient getNNClient () {
Iterator<AbstractDaemonClient> iter = getDaemons().get(Role.NN).iterator();
return (NNClient) iter.next();
}
public List<DNClient> getDNClients () {
return (List) getDaemons().get(Role.DN);
}
public DNClient getDNClient (String hostname) {
for (DNClient dnC : getDNClients()) {
if (dnC.getHostName().equals(hostname))
return dnC;
}
return null;
}
public static class HDFSProcessManager extends HadoopDaemonRemoteCluster {
public HDFSProcessManager() {
super(hdfsDaemonInfos);
}
}
public static class MultiUserHDFSProcessManager
extends MultiUserHadoopDaemonRemoteCluster {
public MultiUserHDFSProcessManager() {
super(hdfsDaemonInfos);
}
}
public static HDFSCluster createCluster(Configuration conf) throws Exception {
conf.addResource("system-test.xml");
String sockAddrStr = FileSystem.getDefaultUri(conf).getAuthority();
if (sockAddrStr == null) {
throw new IllegalArgumentException("Namenode IPC address is not set");
}
String[] splits = sockAddrStr.split(":");
if (splits.length != 2) {
throw new IllegalArgumentException(
"Namenode report IPC is not correctly configured");
}
nnHostName = splits[0];
DN_hostFileName = conf.get(CONF_HADOOP_DN_HOSTFILE_NAME, "slaves");
hdfsDaemonInfos = new ArrayList<HadoopDaemonInfo>();
hdfsDaemonInfos.add(new HadoopDaemonInfo("namenode",
Role.NN, Arrays.asList(new String[]{nnHostName})));
hdfsDaemonInfos.add(new HadoopDaemonInfo("datanode",
Role.DN, DN_hostFileName));
String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
if (implKlass == null || implKlass.isEmpty()) {
implKlass = HDFSCluster.HDFSProcessManager.class.getName();
}
Class<ClusterProcessManager> klass =
(Class<ClusterProcessManager>) Class.forName(implKlass);
ClusterProcessManager clusterProcessMgr = klass.newInstance();
LOG.info("Created ClusterProcessManager as " + implKlass);
clusterProcessMgr.init(conf);
return new HDFSCluster(conf, clusterProcessMgr);
}
}

View File

@ -1,46 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.test.system.AbstractDaemonClient;
import org.apache.hadoop.test.system.DaemonProtocol;
import org.apache.hadoop.test.system.process.RemoteProcess;
public abstract class HDFSDaemonClient<PROXY extends DaemonProtocol>
extends AbstractDaemonClient<PROXY> {
public HDFSDaemonClient(Configuration conf, RemoteProcess process)
throws IOException {
super(conf, process);
}
public String[] getHDFSDataDirs() throws IOException {
return getProxy().getDaemonConf().getStrings(
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
}
public String getHDFSNameDirs() throws IOException {
return getProxy().getDaemonConf().getStrings(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
}
}

View File

@ -1,88 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.test.system.process.RemoteProcess;
public class NNClient extends HDFSDaemonClient<NNProtocol> {
NNProtocol proxy;
private static final String HADOOP_NAMENODE_OPTS_ENV = "HADOOP_NAMENODE_OPTS";
public NNClient(Configuration conf, RemoteProcess process) throws IOException {
super(conf, process);
}
@Override
public void connect() throws IOException {
if (isConnected())
return;
String sockAddrStr = FileSystem.getDefaultUri(getConf()).getAuthority();
if (sockAddrStr == null) {
throw new IllegalArgumentException("Namenode IPC address is not set");
}
String[] splits = sockAddrStr.split(":");
if (splits.length != 2) {
throw new IllegalArgumentException(
"Namenode report IPC is not correctly configured");
}
String port = splits[1];
String sockAddr = getHostName() + ":" + port;
InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
proxy = (NNProtocol) RPC.getProxy(NNProtocol.class, NNProtocol.versionID,
bindAddr, getConf());
setConnected(true);
}
@Override
public void disconnect() throws IOException {
RPC.stopProxy(proxy);
setConnected(false);
}
@Override
protected NNProtocol getProxy() {
return proxy;
}
@Override
public String getHadoopOptsEnvName() {
return HADOOP_NAMENODE_OPTS_ENV;
}
/**
* Concrete implementation of abstract super class method
* @param attributeName name of the attribute to be retrieved
* @return Object value of the given attribute
* @throws IOException is thrown in case of communication errors
*/
@Override
public Object getDaemonAttribute (String attributeName) throws IOException {
return getJmxAttribute("NameNode", "NameNodeInfo", attributeName);
}
}

View File

@ -1,36 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.test.system;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.test.system.DaemonProtocol;
/**
* Client side API exposed from Namenode.
* Actual implementations are likely to be injected
*
* The protocol has to be annotated so KerberosInfo can be filled in during
* creation of a ipc.Client connection
*/
@KerberosInfo(
serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
public interface NNProtocol extends DaemonProtocol {
public static final long versionID = 1L;
}

View File

@ -1,86 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.test.system.DNClient;
import org.apache.hadoop.hdfs.test.system.HDFSCluster;
import org.apache.hadoop.hdfs.test.system.NNClient;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
public class TestHL040 {
private HDFSCluster cluster = null;
private static final Log LOG = LogFactory.getLog(TestHL040.class);
public TestHL040() throws Exception {
}
@Before
public void setupUp() throws Exception {
cluster = HDFSCluster.createCluster(new Configuration());
cluster.setUp();
}
@After
public void tearDown() throws Exception {
cluster.tearDown();
}
@Test
public void testConnect() throws IOException {
LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
LOG.info("================ Getting namenode info ================");
NNClient dfsMaster = cluster.getNNClient();
LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
dfsMaster.getProcessInfo());
LOG.info("================ Getting datanode info ================");
Collection<DNClient> clients = cluster.getDNClients();
for (DNClient dnC : clients) {
LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
dnC.getProcessInfo());
Assert.assertNotNull("Datanode process info isn't suppose to be null",
dnC.getProcessInfo());
LOG.info("Free space " + getFreeSpace(dnC));
}
}
private long getFreeSpace(DNClient dnC) throws IOException {
Object volObj = dnC.getDaemonAttribute("VolumeInfo");
Assert.assertNotNull("Attribute value is expected to be not null", volObj);
LOG.debug("Got object: " + volObj);
Map volInfoMap = (Map) JSON.parse(volObj.toString());
long totalFreeSpace = 0L;
for (Object key : volInfoMap.keySet()) {
Map attrMap = (Map) volInfoMap.get(key);
long freeSpace = (Long) attrMap.get("freeSpace");
totalFreeSpace += freeSpace;
}
return totalFreeSpace;
}
}

View File

@ -1,231 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.test.system.MRCluster;
import org.apache.hadoop.mapreduce.test.system.JTProtocol;
import org.apache.hadoop.mapreduce.test.system.JTClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobSubmission;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobVerification;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobStory;
import org.apache.hadoop.tools.rumen.ZombieJob;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.mapreduce.JobID;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.util.Iterator;
import java.util.Map;
import java.util.List;
import java.util.Set;
import java.io.IOException;
import org.junit.Assert;
/**
* Run and verify the Gridmix jobs for given a trace.
*/
public class GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(GridmixSystemTestCase.class);
protected static Configuration conf = new Configuration();
protected static MRCluster cluster;
protected static int cSize;
protected static JTClient jtClient;
protected static JTProtocol rtClient;
protected static Path gridmixDir;
protected static Map<String, String> map;
protected static GridmixJobSubmission gridmixJS;
protected static GridmixJobVerification gridmixJV;
protected static List<JobID> jobids;
@BeforeClass
public static void before() throws Exception {
String [] excludeExpList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(excludeExpList);
cluster.setUp();
cSize = cluster.getTTClients().size();
jtClient = cluster.getJTClient();
rtClient = jtClient.getProxy();
gridmixDir = new Path("herriot-gridmix");
UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
map = UtilsForGridmix.getMRTraces(rtClient.getDaemonConf());
}
@AfterClass
public static void after() throws Exception {
UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
org.apache.hadoop.fs.FileUtil.fullyDelete(new java.io.File(System.
getProperty("java.io.tmpdir") + "/gridmix-st/"));
cluster.tearDown();
/* Clean up the proxy user directories if gridmix run with
RoundRobinUserResovler mode.*/
if (gridmixJV != null
&& gridmixJV.getJobUserResolver().contains("RoundRobin")) {
List<String> proxyUsers =
UtilsForGridmix.listProxyUsers(gridmixJS.getJobConf(),
UserGroupInformation.getLoginUser().getShortUserName());
for(int index = 0; index < proxyUsers.size(); index++){
UtilsForGridmix.cleanup(new Path("hdfs:///user/" +
proxyUsers.get(index)),
rtClient.getDaemonConf());
}
}
}
/**
* Run the gridmix with specified runtime parameters and
* verify the jobs the after completion of execution.
* @param runtimeValues - common runtime arguments for gridmix.
* @param otherValues - test specific runtime arguments for gridmix.
* @param tracePath - path of a trace file.
* @throws Exception - if an exception occurs.
*/
public static void runGridmixAndVerify(String[] runtimeValues,
String [] otherValues, String tracePath) throws Exception {
runGridmixAndVerify(runtimeValues, otherValues, tracePath ,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Run the gridmix with specified runtime parameters and
* verify the jobs the after completion of execution.
* @param runtimeValues - common runtime arguments for gridmix.
* @param otherValues - test specific runtime arguments for gridmix.
* @param tracePath - path of a trace file.
* @param mode - 1 for data generation, 2 for run the gridmix and 3 for
* data generation and run the gridmix.
* @throws Exception - if an exception occurs.
*/
public static void runGridmixAndVerify(String [] runtimeValues,
String [] otherValues, String tracePath, int mode) throws Exception {
List<JobID> jobids = runGridmix(runtimeValues, otherValues, mode);
gridmixJV = new GridmixJobVerification(new Path(tracePath),
gridmixJS.getJobConf(), jtClient);
gridmixJV.verifyGridmixJobsWithJobStories(jobids);
}
/**
* Run the gridmix with user specified mode.
* @param runtimeValues - common runtime parameters for gridmix.
* @param otherValues - test specifix runtime parameters for gridmix.
* @param mode - 1 for data generation, 2 for run the gridmix and 3 for
* data generation and run the gridmix.
* @return - list of gridmix job ids.
* @throws Exception - if an exception occurs.
*/
public static List<JobID> runGridmix(String[] runtimeValues,
String[] otherValues, int mode) throws Exception {
gridmixJS = new GridmixJobSubmission(rtClient.getDaemonConf(),
jtClient, gridmixDir);
gridmixJS.submitJobs(runtimeValues, otherValues, mode);
List<JobID> jobids =
UtilsForGridmix.listGridmixJobIDs(jtClient.getClient(),
gridmixJS.getGridmixJobCount());
return jobids;
}
/**
* get the trace file based on given regular expression.
* @param regExp - trace file file pattern.
* @return - trace file as string.
* @throws IOException - if an I/O error occurs.
*/
public static String getTraceFile(String regExp) throws IOException {
List<String> listTraces = UtilsForGridmix.listMRTraces(
rtClient.getDaemonConf());
Iterator<String> ite = listTraces.iterator();
while(ite.hasNext()) {
String traceFile = ite.next();
if (traceFile.indexOf(regExp)>=0) {
return traceFile;
}
}
return null;
}
/**
* Validate the task memory parameters.
* @param tracePath - trace file.
* @param isTraceHasHighRamJobs - true if trace has high ram job(s)
* otherwise its false
*/
@SuppressWarnings("deprecation")
public static void validateTaskMemoryParamters(String tracePath,
boolean isTraceHasHighRamJobs) throws IOException {
if (isTraceHasHighRamJobs) {
GridmixJobStory gjs = new GridmixJobStory(new Path(tracePath),
rtClient.getDaemonConf());
Set<JobID> jobids = gjs.getZombieJobs().keySet();
boolean isHighRamFlag = false;
for (JobID jobid :jobids) {
ZombieJob zombieJob = gjs.getZombieJobs().get(jobid);
JobConf origJobConf = zombieJob.getJobConf();
int origMapFactor =
GridmixJobVerification.getMapFactor(origJobConf);
int origReduceFactor =
GridmixJobVerification.getReduceFactor(origJobConf);
if (origMapFactor >= 2 || origReduceFactor >= 2) {
isHighRamFlag = true;
long TaskMapMemInMB =
GridmixJobVerification.getScaledTaskMemInMB(
GridMixConfig.JOB_MAP_MEMORY_MB,
GridMixConfig.CLUSTER_MAP_MEMORY,
origJobConf, rtClient.getDaemonConf());
long TaskReduceMemInMB =
GridmixJobVerification.getScaledTaskMemInMB(
GridMixConfig.JOB_REDUCE_MEMORY_MB,
GridMixConfig.CLUSTER_REDUCE_MEMORY,
origJobConf, rtClient.getDaemonConf());
long taskMapLimitInMB =
conf.getLong(GridMixConfig.CLUSTER_MAX_MAP_MEMORY,
JobConf.DISABLED_MEMORY_LIMIT);
long taskReduceLimitInMB =
conf.getLong(GridMixConfig.CLUSTER_MAX_REDUCE_MEMORY,
JobConf.DISABLED_MEMORY_LIMIT);
GridmixJobVerification.verifyMemoryLimits(TaskMapMemInMB,
taskMapLimitInMB);
GridmixJobVerification.verifyMemoryLimits(TaskReduceMemInMB,
taskReduceLimitInMB);
}
}
Assert.assertTrue("Trace doesn't have atleast one high ram job.",
isHighRamFlag);
}
}
public static boolean isLocalDistCache(String fileName, String userName,
boolean visibility) {
return DistributedCacheEmulator.isLocalDistCacheFile(fileName,
userName, visibility);
}
}

View File

@ -1,108 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test cpu emulation with default interval for gridmix jobs
* against different input data, submission policies and user resolvers.
* Verify the cpu resource metrics of both maps and reduces phase of
* Gridmix jobs with their corresponding original job in the input trace.
*/
public class TestCPUEmulationForMapsAndReducesWithCustomInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationWithUncompressedInput.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : STRESS, UserResovler: RoundRobinUserResolver.
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulationForMapsAndReducesWithCompressedInputCase7()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : SERIAL, UserResovler: SubmitterUserResolver
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase8()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found.", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.4F",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN };
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -1,105 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.JobContext;
import org.junit.Test;
import org.junit.Assert;
/**
* Test cpu emulation with default interval for gridmix jobs
* against different input data, submission policies and user resolvers.
* Verify the cpu resource metrics for both maps and reduces of
* Gridmix jobs with their corresponding original job in the input trace.
*/
public class TestCPUEmulationForMapsAndReducesWithDefaultInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestCPUEmulationForMapsAndReducesWithDefaultInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default setting. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : REPLAY, UserResovler: RoundRobinUserResolver.
* Once the {@link Gridmix} run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original jobs in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulationForMapsAndReducesWithCompressedInputCase5()
throws Exception {
final long inputSizeInMB = 7168;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* cpu emulation feature with default settings. The {@link Gridmix}
* should use the following runtime parameters.
* Submission Policy : STRESS, UserResovler: SubmitterUserResolver
* Once the Gridmix run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original jobs in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsAndReducesWithUncompressedInputCase6()
throws Exception {
final long inputSizeInMB = cSize * 400;
String tracePath = getTraceFile("cpu_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN };
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -1,105 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} cpu emulation with custom interval for
* gridmix jobs against different input data, submission policies and
* user resolvers. Verify the map phase cpu metrics of gridmix jobs
* against their original job in the trace.
*/
public class TestCPUEmulationForMapsWithCustomInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationForMapsWithCustomInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on
* cpu emulation feature with custom setting. The {@link Gridmix} should
* use the following runtime parameters while running gridmix jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Once {@link Gridmix} run is complete, verify maps phase cpu resource
* metrics of {@link Gridmix} jobs with their corresponding original
* in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithCompressedInputCase3()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.25F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on
* cpu emulation feature with custom settings. The {@link Gridmix}
* should use the following runtime paramters while running gridmix jobs.
* Submission Policy: REPLAY User Resolver Mode: RoundRobinUserResolver
* Once {@link Gridmix} run is complete, verify the map phase cpu resource
* metrics of {@link Gridmix} jobs with their corresponding jobs
* in the original trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsUnCompressedInputCase4()
throws Exception {
final long inputSizeInMB = cSize * 200;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
}

View File

@ -1,103 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} cpu emulation with default settings for
* gridmix jobs against different input data, submission policies and
* user resolvers. Verify the map phase cpu metrics of gridmix jobs
* against their original jobs in the trace.
*/
public class TestCPUEmulationForMapsWithDefaultInterval
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestCPUEmulationForMapsWithDefaultInterval.class");
int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
/**
* Generate compressed input and run {@link Gridmix} by turning on cpu
* emulation feature with default settings. The {@link Gridmix} should
* use the following runtime parameters while running the gridmix jobs.
* Submission Policy: STRESS, UserResolver: SubmitterUserResolver.
* Once the {@link Gridmix} run is complete, verify map phase cpu metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithCompressedInputCase1()
throws Exception {
final long inputSizeInMB = 1024 * 6;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues = { "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on
* cpu emulation feature with default settings. The {@link Gridmix}
* should use the following runtime parameters while running Gridmix jobs.
* Submission Policy: REPLAY, UserResolver: RoundRobinUserResolver
* Once the Gridmix run is complete, verify cpu resource metrics of
* {@link Gridmix} jobs with their corresponding original job in a trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testCPUEmulatonForMapsWithUnCompressedInputCase2()
throws Exception {
final long inputSizeInMB = cSize * 200;
String tracePath = getTraceFile("cpu_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,96 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the compression emulation for all the jobs in the trace
* irrespective of compressed inputs.
*/
public class TestCompressionEmulationEnableForAllTypesOfJobs
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestCompressionEmulationEnableForAllTypesOfJobs.class");
/**
* Generate compressed input data and verify the compression emulation
* for all the jobs in the trace irrespective of whether the original
* job uses the compressed input or not.Also use the custom compression
* ratios for map input, map output and reduce output.
* @throws Exception - if an error occurs.
*/
@Test
public void testInputCompressionEmualtionEnableForAllJobsWithDefaultRatios()
throws Exception {
final long inputSizeInMB = 1024 * 6;
final String tracePath = getTraceFile("compression_case4_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
"-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.46",
"-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
"-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.36"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Use existing compressed input data and turn off the compression
* emulation. Verify the compression emulation whether it uses
* by the jobs or not.
* @throws Exception - if an error occurs.
*/
@Test
public void testInputCompressionEmulationEnableForAllJobsWithCustomRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case4_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,98 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the gridmix jobs compression ratio's of input,
* intermediate input and with default/custom ratios.Also verify
* the compressed output file format is enabled or not.
*
*/
public class TestCompressionEmulationForCompressInAndUncompressOut
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestCompressionEmulationForCompressInAndUncompressOut.class");
final long inputSizeInMB = 1024 * 6;
/**
* Generate a compressed input data and verify the compression ratios
* of map input and map output against default compression ratios
* and also verify the whether the compressed output file output format
* is enabled or not.
* @throws Exception -if an error occurs.
*/
@Test
public void testCompressionEmulationOfCompressedInputWithDefaultRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case2_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Use existing compressed input data and verify the compression ratios
* of input and intermediate input against custom compression ratios
* and also verify the compressed output file output format is enabled or not.
* @throws Exception -if an error occurs.
*/
@Test
public void testCompressionEmulationOfCompressedInputWithCustomRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case2_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
"-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.58",
"-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.42"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the gridmix jobs compression ratio's of reduce output and
* with default and custom ratios.
*/
public class TestCompressionEmulationForUncompressInAndCompressOut
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestCompressionEmulationForUncompressInAndCompressOut.class");
final long inputSizeInMB = 1024 * 6;
/**
* Generate a uncompressed input data and verify the compression ratios
* of reduce output against default output compression ratio.
* @throws Exception -if an error occurs.
*/
@Test
public void testCompressionEmulationOfCompressedOuputWithDefaultRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case3_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Use existing uncompressed input data and verify the compression ratio
* of reduce output against custom output compression ratio and also verify
* the compression output file output format.
* @throws Exception -if an error occurs.
*/
@Test
public void testCompressionEmulationOfCompressedOutputWithCustomRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case3_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
final String [] runtimeValues = { "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath };
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.38"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,65 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace by disabling the
* emulation of high ram and verify each {@link Gridmix} job
* whether it honors the high ram or not. In disable mode it should
* should not honor the high ram and run it as a normal job.
*/
public class TestDisableGridmixEmulationOfHighRam
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestDisableGridmixEmulationOfHighRam.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and STRESS submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the
* high ram or not after completion of execution. In disable mode the
* jobs should not honor the high ram.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case3");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,95 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the emulation of HDFS and Local FS distributed cache files against
* the given input trace file.
*/
public class TestEmulationOfHDFSAndLocalFSDCFiles extends
GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
/**
* Generate the input data and distributed cache files for HDFS and
* local FS. Verify the gridmix emulation of HDFS and Local FS
* distributed cache files in RoundRobinUserResolver mode with STRESS
* submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateDataEmulateHDFSAndLocalFSDCFiles()
throws Exception {
final long inputSizeInMB = 1024 * 6;
final String tracePath = getTraceFile("distcache_case8_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Use existing input and distributed cache files for HDFS and
* local FS. Verify the gridmix emulation of HDFS and Local FS
* distributed cache files in SubmitterUserResolver mode with REPLAY
* submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHDFSAndLocalFSDCFiles()
throws Exception {
final String tracePath = getTraceFile("distcache_case8_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues ={"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,91 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix emulation of HDFS distributed cache file which uses
* different jobs that are submitted with different users.
*/
public class TestEmulationOfHDFSDCFileUsesMultipleJobs extends
GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestEmulationOfHDFSDCFileUsesMultipleJobs.class");
/**
* Generate the input data and HDFS distributed cache file based
* on given input trace. Verify the Gridmix emulation of HDFS
* distributed cache file in RoundRobinResolver mode with
* STRESS submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulationOfHDFSDCFile()
throws Exception {
final long inputSizeInMB = 1024 * 6;
final String tracePath = getTraceFile("distcache_case9_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify the Gridmix emulation of HDFS distributed cache
* file in SubmitterUserResolver mode with STRESS submission policy
* by using the existing input data and HDFS distributed cache file.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixEmulationOfHDFSPublicDCFile()
throws Exception {
final String tracePath = getTraceFile("distcache_case9_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,92 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix emulation of HDFS distributed cache files of
* different visibilities.
*/
public class TestEmulationOfHDFSDCFilesWithDifferentVisibilities
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestEmulationOfHDFSDCFilesWithDifferentVisibilities.class");
/**
* Generate input data and HDFS distributed cache files of different
* visibilities based on given input trace. Verify the Gridmix emulation
* of HDFS distributed cache files of different visibilities in
* RoundRobinUserResolver mode with SERIAL submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulateOfHDFSDCFilesWithDiffVisibilities()
throws Exception {
final long INPUT_SIZE = 1024 * 9;
final String tracePath = getTraceFile("distcache_case5_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
INPUT_SIZE+"m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Disable the distributed cache emulation and verify the Gridmix jobs
* whether it emulates or not.
* @throws Exception
*/
@Test
public void testHDFSDCFilesWithoutEnableDCEmulation()
throws Exception {
final String tracePath = getTraceFile("distcache_case6_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues ={ "LOADJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with combination of high ram and normal jobs of
* trace and verify whether high ram jobs{@link Gridmix} are honoring or not.
* Normal MR jobs should not honors the high ram emulation.
*/
public class TestEmulationOfHighRamAndNormalMRJobs
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestEmulationOfHighRamAndNormalMRJobs.class");
/**
* Generate input data and run the combination normal and high ram
* {@link Gridmix} jobs as load job and STRESS submission policy
* in a SubmitterUserResolver mode. Verify whether each {@link Gridmix}
* job honors the high ram or not after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case4");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeArgs = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeArgs, otherArgs, tracePath);
}
}

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the emulation of local FS distributed cache files.
*
*/
public class TestEmulationOfLocalFSDCFiles extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
/**
* Generate the input data and distributer cache files.Verify the
* gridmix emulation of local file system distributed cache files
* in RoundRobinUserResolver mode with REPLAY submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateInputAndEmulateLocalFSDCFile()
throws Exception {
final long inputSizeInMB = 1024 * 6;
final String tracePath = getTraceFile("distcache_case7_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Use existing input and local distributed cache files and verify
* the gridmix emulation of local file system distributed cache
* files in SubmitterUserResolver mode with STRESS
* Submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfLocalFSDCFile()
throws Exception {
final String tracePath = getTraceFile("distcache_case7_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,229 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapreduce.test.system.MRCluster;
import org.apache.hadoop.mapreduce.test.system.JTProtocol;
import org.apache.hadoop.mapreduce.test.system.JTClient;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.gridmix.RoundRobinUserResolver;
import org.apache.hadoop.mapred.gridmix.EchoUserResolver;
import org.apache.hadoop.mapred.gridmix.SubmitterUserResolver;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.ContentSummary;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import org.junit.Assert;
import java.io.IOException;
/**
* Verify the Gridmix data generation with various submission policies and
* user resolver modes.
*/
public class TestGridMixDataGeneration {
private static final Log LOG =
LogFactory.getLog(TestGridMixDataGeneration.class);
private static Configuration conf = new Configuration();
private static MRCluster cluster;
private static JTClient jtClient;
private static JTProtocol rtClient;
private static Path gridmixDir;
private static int cSize;
@BeforeClass
public static void before() throws Exception {
String [] excludeExpList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(excludeExpList);
cluster.setUp();
cSize = cluster.getTTClients().size();
jtClient = cluster.getJTClient();
rtClient = jtClient.getProxy();
gridmixDir = new Path("herriot-gridmix");
UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
}
@AfterClass
public static void after() throws Exception {
UtilsForGridmix.cleanup(gridmixDir,conf);
cluster.tearDown();
}
/**
* Generate the data in a STRESS submission policy with SubmitterUserResolver
* mode and verify whether the generated data matches with given
* input size or not.
* @throws IOException
*/
@Test
public void testGenerateDataWithSTRESSSubmission() throws Exception {
conf = rtClient.getDaemonConf();
final long inputSizeInMB = cSize * 128;
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file:///dev/null"};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
int exitCode =
UtilsForGridmix.runGridmixJob(gridmixDir, conf,
GridMixRunMode.DATA_GENERATION.getValue(),
runtimeValues, otherArgs);
Assert.assertEquals("Data generation has failed.", 0 , exitCode);
checkGeneratedDataAndJobStatus(inputSizeInMB);
}
/**
* Generate the data in a REPLAY submission policy with RoundRobinUserResolver
* mode and verify whether the generated data matches with the given
* input size or not.
* @throws Exception
*/
@Test
public void testGenerateDataWithREPLAYSubmission() throws Exception {
conf = rtClient.getDaemonConf();
final long inputSizeInMB = cSize * 300;
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB +"m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
"file:///dev/null"};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
int exitCode =
UtilsForGridmix.runGridmixJob(gridmixDir, conf,
GridMixRunMode.DATA_GENERATION.getValue(),
runtimeValues, otherArgs);
Assert.assertEquals("Data generation has failed.", 0 , exitCode);
checkGeneratedDataAndJobStatus(inputSizeInMB);
}
/**
* Generate the data in a SERIAL submission policy with EchoUserResolver
* mode and also set the no.of bytes per file in the data.Verify whether each
* file size matches with given per file size or not and also
* verify the overall size of generated data.
* @throws Exception
*/
@Test
public void testGenerateDataWithSERIALSubmission() throws Exception {
conf = rtClient.getDaemonConf();
long perNodeSizeInMB = 500; // 500 mb per node data
final long inputSizeInMB = cSize * perNodeSizeInMB;
String [] runtimeValues ={"LOADJOB",
EchoUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
"file:///dev/null"};
long bytesPerFile = 200 * 1024 * 1024; // 200 mb per file of data
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
int exitCode =
UtilsForGridmix.runGridmixJob(gridmixDir, conf,
GridMixRunMode.DATA_GENERATION.getValue(),
runtimeValues, otherArgs);
Assert.assertEquals("Data generation has failed.", 0 , exitCode);
LOG.info("Verify the eache file size in a generate data.");
verifyEachNodeSize(new Path(gridmixDir, "input"), perNodeSizeInMB);
verifyNumOfFilesGeneratedInEachNode(new Path(gridmixDir, "input"),
perNodeSizeInMB, bytesPerFile);
checkGeneratedDataAndJobStatus(inputSizeInMB);
}
private void checkGeneratedDataAndJobStatus(long inputSize)
throws IOException {
LOG.info("Verify the generated data size.");
long dataSizeInMB = getDataSizeInMB(new Path(gridmixDir,"input"));
Assert.assertTrue("Generate data has not matched with given size",
dataSizeInMB + 0.1 > inputSize || dataSizeInMB - 0.1 < inputSize);
JobClient jobClient = jtClient.getClient();
int len = jobClient.getAllJobs().length;
LOG.info("Verify the job status after completion of job.");
Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED,
jobClient.getAllJobs()[len-1].getRunState());
}
private void verifyEachNodeSize(Path inputDir, long dataSizePerNode)
throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
FileStatus [] fstatus = fs.listStatus(inputDir);
for (FileStatus fstat : fstatus) {
if ( fstat.isDirectory()) {
long fileSize = getDataSizeInMB(fstat.getPath());
Assert.assertTrue("The Size has not matched with given "
+ "per node file size(" + dataSizePerNode +"MB)",
fileSize + 0.1 > dataSizePerNode
|| fileSize - 0.1 < dataSizePerNode);
}
}
}
private void verifyNumOfFilesGeneratedInEachNode(Path inputDir,
long nodeSize, long fileSize) throws IOException {
long fileCount = nodeSize/fileSize;
long expFileCount = Math.round(fileCount);
expFileCount = expFileCount + ((nodeSize%fileSize != 0)? 1:0);
FileSystem fs = inputDir.getFileSystem(conf);
FileStatus [] fstatus = fs.listStatus(inputDir);
for (FileStatus fstat : fstatus) {
if ( fstat.isDirectory()) {
FileSystem nodeFs = fstat.getPath().getFileSystem(conf);
long actFileCount = nodeFs.getContentSummary(
fstat.getPath()).getFileCount();
Assert.assertEquals("File count has not matched.", expFileCount,
actFileCount);
}
}
}
private static long getDataSizeInMB(Path inputDir) throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
ContentSummary csmry = fs.getContentSummary(inputDir);
long dataSize = csmry.getLength();
dataSize = dataSize/(1024 * 1024);
return dataSize;
}
}

View File

@ -1,128 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.test.system.MRCluster;
import org.apache.hadoop.mapreduce.test.system.JTClient;
import org.apache.hadoop.mapreduce.test.system.JTProtocol;
import org.apache.hadoop.mapred.gridmix.FilePool;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.AfterClass;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
public class TestGridMixFilePool {
private static final Log LOG =
LogFactory.getLog(TestGridMixFilePool.class);
private static Configuration conf = new Configuration();
private static MRCluster cluster;
private static JTProtocol remoteClient;
private static JTClient jtClient;
private static Path gridmixDir;
private static int clusterSize;
@BeforeClass
public static void before() throws Exception {
String [] excludeExpList = {"java.net.ConnectException",
"java.io.IOException"};
cluster = MRCluster.createCluster(conf);
cluster.setExcludeExpList(excludeExpList);
cluster.setUp();
jtClient = cluster.getJTClient();
remoteClient = jtClient.getProxy();
clusterSize = cluster.getTTClients().size();
gridmixDir = new Path("herriot-gridmix");
UtilsForGridmix.createDirs(gridmixDir, remoteClient.getDaemonConf());
}
@AfterClass
public static void after() throws Exception {
UtilsForGridmix.cleanup(gridmixDir, conf);
cluster.tearDown();
}
@Test
public void testFilesCountAndSizesForSpecifiedFilePool() throws Exception {
conf = remoteClient.getDaemonConf();
final long inputSizeInMB = clusterSize * 200;
int [] fileSizesInMB = {50, 100, 400, 50, 300, 10, 60, 40, 20 ,10 , 500};
long targetSize = Long.MAX_VALUE;
final int expFileCount = clusterSize + 4;
String [] runtimeValues ={"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file:///dev/null"};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
// Generate the input data by using gridmix framework.
int exitCode =
UtilsForGridmix.runGridmixJob(gridmixDir, conf,
GridMixRunMode.DATA_GENERATION.getValue(),
runtimeValues, otherArgs);
Assert.assertEquals("Data generation has failed.", 0 , exitCode);
// Create the files without using gridmix input generation with
// above mentioned sizes in a array.
createFiles(new Path(gridmixDir, "input"), fileSizesInMB);
conf.setLong(FilePool.GRIDMIX_MIN_FILE, 100 * 1024 * 1024);
FilePool fpool = new FilePool(conf, new Path(gridmixDir, "input"));
fpool.refresh();
verifyFilesSizeAndCountForSpecifiedPool(expFileCount, targetSize, fpool);
}
private void createFiles(Path inputDir, int [] fileSizes)
throws Exception {
for (int size : fileSizes) {
UtilsForGridmix.createFile(size, inputDir, conf);
}
}
private void verifyFilesSizeAndCountForSpecifiedPool(int expFileCount,
long minFileSize, FilePool pool) throws IOException {
final ArrayList<FileStatus> files = new ArrayList<FileStatus>();
long filesSizeInBytes = pool.getInputFiles(minFileSize, files);
long actFilesSizeInMB = filesSizeInBytes / (1024 * 1024);
long expFilesSizeInMB = (clusterSize * 200) + 1300;
Assert.assertEquals("Files Size has not matched for specified pool.",
expFilesSizeInMB, actFilesSizeInMB);
int actFileCount = files.size();
Assert.assertEquals("File count has not matched.", expFileCount,
actFileCount);
int count = 0;
for (FileStatus fstat : files) {
String fp = fstat.getPath().toString();
count = count + ((fp.indexOf("datafile_") > 0)? 0 : 1);
}
Assert.assertEquals("Total folders are not matched with cluster size",
clusterSize, count);
}
}

View File

@ -1,173 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapred.gridmix.Gridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix generated input if compression emulation turn on.
*/
public class TestGridmixCompressedInputGeneration
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixCompressedInputGeneration.class");
/**
* Generate input data and verify whether input files are compressed
* or not.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixCompressionInputGeneration() throws Exception {
final long inputSizeInMB = 1024 * 7;
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file:///dev/null"};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
};
LOG.info("Verify the generated compressed input data.");
runAndVerify(true, inputSizeInMB, runtimeValues, otherArgs);
}
/**
* Disable compression emulation and verify whether input files are
* compressed or not.
* @throws Exception
*/
@Test
public void testGridmixInputGenerationWithoutCompressionEnable()
throws Exception {
UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
final long inputSizeInMB = 1024 * 6;
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file:///dev/null"};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
LOG.info("Verify the generated uncompressed input data.");
runAndVerify(false, inputSizeInMB, runtimeValues, otherArgs);
}
private void runAndVerify(boolean isCompressed, long INPUT_SIZE,
String [] runtimeValues, String [] otherArgs) throws Exception {
int exitCode =
UtilsForGridmix.runGridmixJob(gridmixDir, conf,
GridMixRunMode.DATA_GENERATION.getValue(),
runtimeValues,otherArgs);
Assert.assertEquals("Data generation has failed.", 0, exitCode);
verifyJobStatus();
verifyInputDataSize(INPUT_SIZE);
verifyInputFiles(isCompressed);
}
private void verifyInputFiles(boolean isCompressed) throws IOException {
List<String> inputFiles =
getInputFiles(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
for (String inputFile: inputFiles) {
boolean fileStatus = (inputFile.contains(".gz")
|| inputFile.contains(".tgz"))? true : false;
if (isCompressed) {
Assert.assertTrue("Compressed input split file was not found.",
fileStatus);
} else {
Assert.assertFalse("Uncompressed input split file was not found.",
fileStatus);
}
}
}
private void verifyInputDataSize(long INPUT_SIZE) throws IOException {
long actDataSize =
getInputDataSizeInMB(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
double ratio = ((double)actDataSize)/INPUT_SIZE;
long expDataSize = (long)(INPUT_SIZE * ratio);
Assert.assertEquals("Generated data has not matched with given size.",
expDataSize, actDataSize);
}
private void verifyJobStatus() throws IOException {
JobClient jobClient = jtClient.getClient();
int len = jobClient.getAllJobs().length;
LOG.info("Verify the job status after completion of job...");
Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED,
jobClient.getAllJobs()[len -1].getRunState());
}
private long getInputDataSizeInMB(Configuration conf, Path inputDir)
throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
ContentSummary csmry = fs.getContentSummary(inputDir);
long dataSize = csmry.getLength();
dataSize = dataSize/(1024 * 1024);
return dataSize;
}
private List<String> getInputFiles(Configuration conf, Path inputDir)
throws IOException {
FileSystem fs = inputDir.getFileSystem(conf);
FileStatus [] listStatus = fs.listStatus(inputDir);
List<String> files = new ArrayList<String>();
for (FileStatus fileStat : listStatus) {
files.add(getInputFile(fileStat, conf));
}
return files;
}
private String getInputFile(FileStatus fstatus, Configuration conf)
throws IOException {
String fileName = null;
if (!fstatus.isDirectory()) {
fileName = fstatus.getPath().getName();
} else {
FileSystem fs = fstatus.getPath().getFileSystem(conf);
FileStatus [] listStatus = fs.listStatus(fstatus.getPath());
for (FileStatus fileStat : listStatus) {
return getInputFile(fileStat, conf);
}
}
return fileName;
}
}

View File

@ -1,102 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the gridmix jobs compression ratios of map input,
* map output and reduce output with default and user specified
* compression ratios.
*
*/
public class TestGridmixCompressionEmulationWithCompressInput
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestGridmixCompressionEmulationWithCompressInput.class");
final long inputSizeInMB = 1024 * 6;
/**
* Generate compressed input data and verify the map input,
* map output and reduce output compression ratios of gridmix jobs
* against the default compression ratios.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixCompressionRatiosAgainstDefaultCompressionRatio()
throws Exception {
final String tracePath = getTraceFile("compression_case1_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify map input, map output and reduce output compression ratios of
* gridmix jobs against user specified compression ratios.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixOuputCompressionRatiosAgainstCustomRatios()
throws Exception {
final String tracePath = getTraceFile("compression_case1_trace");
Assert.assertNotNull("Trace file has not found.", tracePath);
UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
"-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.68",
"-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
"-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.40"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix emulation of HDFS private distributed cache file.
*/
public class TestGridmixEmulationOfHDFSPrivateDCFile
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHDFSPrivateDCFile.class");
/**
* Generate input data and single HDFS private distributed cache
* file based on given input trace.Verify the Gridmix emulation of
* single private HDFS distributed cache file in RoundRobinUserResolver
* mode with STRESS submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulateOfHDFSPrivateDCFile()
throws Exception {
final long inputSizeInMB = 8192;
final String tracePath = getTraceFile("distcache_case3_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify the Gridmix emulation of single HDFS private distributed
* cache file in SubmitterUserResolver mode with REPLAY submission
* policy by using the existing input data and HDFS private
* distributed cache file.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixEmulationOfHDFSPrivateDCFile()
throws Exception {
final String tracePath = getTraceFile("distcache_case3_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues ={"LOADJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,91 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix emulation of HDFS public distributed cache file.
*/
public class TestGridmixEmulationOfHDFSPublicDCFile
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHDFSPublicDCFile.class");
/**
* Generate the input data and HDFS distributed cache file based
* on given input trace. Verify the Gridmix emulation of single HDFS
* public distributed cache file in SubmitterUserResolver mode with
* STRESS submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulationOfSingleHDFSDCFile()
throws Exception {
final long inputSizeInMB = 7168;
final String tracePath = getTraceFile("distcache_case1_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify the Gridmix emulation of Single HDFS public distributed cache
* file in RoundRobinUserResolver mode with REPLAY submission policy
* by using the existing input data and HDFS public distributed cache file.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixEmulationOfSingleHDFSPublicDCFile()
throws Exception {
final String tracePath = getTraceFile("distcache_case1_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram for both maps and reduces.
*/
public class TestGridmixEmulationOfHighRamJobsCase1
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase1.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and STRESS submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the high ram or not
* after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForMapsAndReducesOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 400;
String tracePath = getTraceFile("highram_mr_jobs_case1");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,67 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram only for maps.
*/
public class TestGridmixEmulationOfHighRamJobsCase2
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase2.class");
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and REPALY submission policy in a RoundRobinUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the high ram or not
* after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForMapsOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("highram_mr_jobs_case2");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.GridmixJob;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the {@link Gridmix} with a high ram jobs trace and
* verify each {@link Gridmix} job whether it honors the high ram or not.
* In the trace the jobs should use the high ram only for reducers.
*/
public class TestGridmixEmulationOfHighRamJobsCase3
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixEmulationOfHighRamJobsCase3.class);
/**
* Generate input data and run {@link Gridmix} with a high ram jobs trace
* as a load job and SERIAL submission policy in a SubmitterUserResolver
* mode. Verify each {@link Gridmix} job whether it honors the
* high ram or not after completion of execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testEmulationOfHighRamForReducersOfMRJobs()
throws Exception {
final long inputSizeInMB = cSize * 250;
String tracePath = getTraceFile("highram_mr_jobs_case3");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
validateTaskMemoryParamters(tracePath, true);
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,91 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
/**
* Verify the Gridmix emulation of Multiple HDFS private distributed
* cache files.
*/
public class TestGridmixEmulationOfMultipleHDFSPrivateDCFiles
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.class");
/**
* Generate input data and multiple HDFS private distributed cache
* files based on given input trace.Verify the Gridmix emulation of
* multiple private HDFS distributed cache files in RoundRobinUserResolver
* mode with SERIAL submission policy.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulationOfMultipleHDFSPrivateDCFiles()
throws Exception {
final long inputSize = 6144;
final String tracePath = getTraceFile("distcache_case4_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"SERIAL",
inputSize+"m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify the Gridmix emulation of multiple HDFS private distributed
* cache files in SubmitterUserResolver mode with STRESS submission
* policy by using the existing input data and HDFS private
* distributed cache files.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixEmulationOfMultipleHDFSPrivateDCFiles()
throws Exception {
final String tracePath = getTraceFile("distcache_case4_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,92 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
/**
* Verify the Gridmix emulation of Multiple HDFS public distributed
* cache files.
*/
public class TestGridmixEmulationOfMultipleHDFSPublicDCFiles
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(
"TestGridmixEmulationOfMultipleHDFSPublicDCFiles.class");
/**
* Generate the compressed input data and dist cache files based
* on input trace. Verify the Gridmix emulation of
* multiple HDFS public distributed cache file.
* @throws Exception - if an error occurs.
*/
@Test
public void testGenerateAndEmulationOfMultipleHDFSDCFiles()
throws Exception {
final long inputSizeInMB = 7168;
final String tracePath = getTraceFile("distcache_case2_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
final String [] otherArgs = {
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Verify the Gridmix emulation of Single HDFS public distributed cache file
* by using an existing input compressed data and HDFS dist cache file.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixEmulationOfMulitpleHDFSPublicDCFile()
throws Exception {
final String tracePath = getTraceFile("distcache_case2_trace");
Assert.assertNotNull("Trace file was not found.", tracePath);
final String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
tracePath};
final String [] otherArgs = {
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.RUN_GRIDMIX.getValue());
}
}

View File

@ -1,67 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
/**
* Run the Gridmix with 10 minutes MR jobs trace and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith10minTrace extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixWith10minTrace.class);
/**
* Generate data and run gridmix by sleep jobs with STRESS submission
* policy in a RoundRobinUserResolver mode against 10 minutes trace file.
* Verify each Gridmix job history with a corresponding job story
* in a trace file after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith10minTrace() throws Exception {
final long inputSizeInMB = cSize * 250;
final long minFileSize = 200 * 1024 * 1024;
String [] runtimeValues =
{"SLEEPJOB",
RoundRobinUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
map.get("10m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
"-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false",
"-D", GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY + "=true",
"-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10"
};
String tracePath = map.get("10m");
runGridmixAndVerify(runtimeValues, otherArgs,tracePath);
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
/**
* Run the Gridmix with 12 minutes MR job traces and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith12minTrace extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixWith12minTrace.class);
/**
* Generate data and run gridmix sleep jobs with REPLAY submission
* policy in a SubmitterUserResolver mode against 12 minutes trace file.
* Verify each Gridmix job history with a corresponding job story
* in a trace file after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith12minTrace() throws Exception {
final long inputSizeInMB = cSize * 150;
String [] runtimeValues = {"SLEEPJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
map.get("12m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10",
"-D", GridMixConfig.GRIDMIX_SLEEP_REDUCE_MAX_TIME + "=5"
};
String tracePath = map.get("12m");
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,59 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.junit.Test;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
/**
* Run the Gridmix with 1 minute MR jobs trace and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith1minTrace extends GridmixSystemTestCase{
private static final Log LOG =
LogFactory.getLog(TestGridmixWith1minTrace.class);
/**
* Generate data and run gridmix by load job with STRESS submission policy
* in a SubmitterUserResolver mode against 1 minute trace file.
* Verify each Gridmix job history with a corresponding job story in the
* trace after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith1minTrace() throws Exception {
final long inputSizeInMB = cSize * 400;
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
map.get("1m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
String tracePath = map.get("1m");
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the Gridmix with 2 minutes job trace which has been generated with
* streaming jobs histories and verify each job history against
* the corresponding job story in a given trace file.
*/
public class TestGridmixWith2minStreamingJobTrace
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixWith2minStreamingJobTrace.class");
/**
* Generate input data and run Gridmix by load job with STRESS submission
* policy in a SubmitterUserResolver mode against 2 minutes job
* trace file of streaming jobs. Verify each Gridmix job history with
* a corresponding job story in a trace file after completion of all
* the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith2minStreamJobTrace() throws Exception {
final long inputSizeInMB = cSize * 250;
final long minFileSize = 150 * 1024 * 1024;
String tracePath = getTraceFile("2m_stream");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
"-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,68 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.junit.Assert;
import org.junit.Test;
/**
* Run the Gridmix with 3 minutes job trace which has been generated with
* streaming jobs histories and verify each job history against
* corresponding job story in a given trace file.
*/
public class TestGridmixWith3minStreamingJobTrace
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixWith3minStreamingJobTrace.class");
/**
* Generate input data and run gridmix by load job with REPLAY submission
* policy in a RoundRobinUserResolver mode against 3 minutes job trace file
* of streaming job. Verify each gridmix job history with a corresponding
* job story in a trace file after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith3minStreamJobTrace() throws Exception {
final long inputSizeInMB = cSize * 200;
final long bytesPerFile = 150 * 1024 * 1024;
String tracePath = getTraceFile("3m_stream");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
"-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
/**
* Run the Gridmix with 3 minutes MR jobs trace and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith3minTrace extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixWith3minTrace.class);
/**
* Generate data and run gridmix by load job with REPLAY submission
* policy in a RoundRobinUserResolver mode by using 3 minutes trace file.
* Verify each Gridmix job history with a corresponding job story in
* a trace after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith3minTrace() throws Exception {
final long inputSizeInMB = cSize * 200;
String [] runtimeValues =
{"LOADJOB",
RoundRobinUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
map.get("3m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
String tracePath = map.get("3m");
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,65 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Run the Gridmix with 5 minutes job trace which has been generated with
* streaming jobs histories and verify each job history against
* corresponding job story in a given trace file.
*/
public class TestGridmixWith5minStreamingJobTrace
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestGridmixWith5minStreamingJobTrace.class");
/**
* Generate input data and run gridmix by load job with SERIAL submission
* policy in a SubmitterUserResolver mode against 5 minutes job trace file
* of streaming job. Verify each gridmix job history with a corresponding
* job story in a trace file after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith5minStreamJobTrace() throws Exception {
String tracePath = getTraceFile("5m_stream");
Assert.assertNotNull("Trace file has not found.", tracePath);
final long inputSizeInMB = cSize * 200;
final long bytesPerFile = 150 * 1024 * 1024;
String [] runtimeValues = {"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_KEY_FRC + "=0.5f",
"-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
/**
* Run the Gridmix with 5 minutes MR jobs trace and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith5minTrace extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixWith5minTrace.class);
/**
* Generate data and run gridmix by load job with SERIAL submission
* policy in a SubmitterUserResolver mode against 5 minutes trace file.
* Verify each Gridmix job history with a corresponding job story
* in a trace file after completion of all the jobs.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith5minTrace() throws Exception {
final long inputSizeInMB = cSize * 300;
final long minFileSize = 100 * 1024 * 1024;
String [] runtimeValues ={"LOADJOB",
SubmitterUserResolver.class.getName(),
"SERIAL",
inputSizeInMB + "m",
map.get("5m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize
};
String tracePath = map.get("5m");
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,62 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.Log;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.junit.Test;
/**
* Run the Gridmix with 7 minutes MR jobs trace and
* verify each job history against the corresponding job story
* in a given trace file.
*/
public class TestGridmixWith7minTrace extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog(TestGridmixWith7minTrace.class);
/**
* Generate data and run gridmix by sleep job with STRESS submission
* policy in a SubmitterUserResolver mode against 7 minute trace file.
* Verify each Gridmix job history with a corresponding job story
* in a trace file after completion of all the jobs execution.
* @throws Exception - if an error occurs.
*/
@Test
public void testGridmixWith7minTrace() throws Exception {
final long inputSizeInMB = cSize * 400;
final long minFileSize = 200 * 1024 * 1024;
String [] runtimeValues ={"SLEEPJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
map.get("7m")};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
"-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false"
};
String tracePath = map.get("7m");
runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
}
}

View File

@ -1,106 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} memory emulation feature for the jobs with
* custom progress interval, different input data, submission policies
* and user resolver modes. Verify the total heap usage of map and reduce
* tasks of the jobs with corresponding original job in the trace.
*/
public class TestMemEmulForMapsAndReducesWithCustomIntrvl
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestMemEmulForMapsAndReducesWithCustomIntrvl.class");
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* memory emulation with custom progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps and reduces total heap memory usage of {@link Gridmix} jobs
* with corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForReducesWithCompressedInputCase7()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* memory emulation with custom progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps and reduces total heap memory usage of {@link Gridmix} jobs
* with corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForReducesWithUncompressedInputCase8()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,106 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} memory emulation feature for gridmix jobs
* with default progress interval, different input data, submission
* policies and user resolver modes. Verify the total heap usage of
* map and reduce tasks of the jobs with corresponding original
* job in the trace.
*/
public class TestMemEmulForMapsAndReducesWithDefaultIntrvl
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestMemEmulForMapsAndReducesWithDefaultIntrvl.class");
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* memory emulation with default progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps and reduces total heap memory usage of {@link Gridmix} jobs
* with corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForReducesWithCompressedInputCase5()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* memory emulation with default progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps and reduces total heap memory usage of {@link Gridmix} jobs
* with corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForReducesWithUncompressedInputCase6()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"REPLAY",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,108 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs
* with default progress interval, custom heap memory ratio, different input
* data, submission policies and user resolver modes. Verify the total heap
* usage of map and reduce tasks of the jobs with corresponding the original job
* in the trace.
*/
public class TestMemEmulForMapsWithCustomHeapMemoryRatio
extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestMemEmulForMapsWithCustomHeapMemoryRatio.class");
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* memory emulation. The {@link Gridmix} should use the following runtime
* parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify total heap memory usage of the tasks of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithCompressedInputCase1()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.5F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* memory emulation. The {@link Gridmix} should use the following runtime
* parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
* Verify total heap memory usage of tasks of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithUncompressedInputCase2()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("mem_emul_case2");
Assert.assertNotNull("Trace file has not found.", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
"-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.4F"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,106 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs
* with custom progress interval, different input data, submission policies
* and user resolver modes. Verify the total heap usage of map tasks of
* the jobs with corresponding the original job in the trace.
*/
public class TestMemEmulForMapsWithCustomIntrvl extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestMemEmulForMapsWithCustomIntrvl.class");
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* memory emulation with custom progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps total heap memory usage of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithCompressedInputCase3()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("mem_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* memory emulation with custom progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
* Verify maps total heap memory usage of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithUncompressedInputCase4()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("mem_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,104 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import org.junit.Assert;
/**
* Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs
* with default progress interval, different input data, submission policies
* and user resolver modes. Verify the total heap usage of map tasks of the
* jobs with corresponding original job in the trace.
*/
public class TestMemEmulForMapsWithDefaultIntrvl extends GridmixSystemTestCase {
private static final Log LOG =
LogFactory.getLog("TestMemEmulForMapsWithDefaultIntrvl.class");
/**
* Generate compressed input and run {@link Gridmix} by turning on the
* memory emulation with default progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
* Verify maps total heap memory usage of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithCompressedInputCase1()
throws Exception {
final long inputSizeInMB = 1024 * 7;
String tracePath = getTraceFile("mem_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
SubmitterUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
/**
* Generate uncompressed input and run {@link Gridmix} by turning on the
* memory emulation with default progress interval. The {@link Gridmix}
* should use the following runtime parameters while running the jobs.
* Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
* Verify maps total heap memory usage of {@link Gridmix} jobs with
* corresponding original job in the trace.
* @throws Exception - if an error occurs.
*/
@Test
public void testMemoryEmulationForMapsWithUncompressedInputCase2()
throws Exception {
final long inputSizeInMB = cSize * 300;
String tracePath = getTraceFile("mem_emul_case1");
Assert.assertNotNull("Trace file not found!", tracePath);
String [] runtimeValues =
{ "LOADJOB",
RoundRobinUserResolver.class.getName(),
"STRESS",
inputSizeInMB + "m",
"file://" + UtilsForGridmix.getProxyUsersFile(conf),
tracePath};
String [] otherArgs = {
"-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +
GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
"-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
"-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
"-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
}
}

View File

@ -1,285 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.test.system;
import org.apache.hadoop.mapred.gridmix.Gridmix;
import org.apache.hadoop.mapred.gridmix.JobCreator;
import org.apache.hadoop.mapred.gridmix.SleepJob;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*;
/**
* Gridmix system tests configurations.
*/
public class GridMixConfig {
/**
* Gridmix original job id.
*/
public static final String GRIDMIX_ORIGINAL_JOB_ID = Gridmix.ORIGINAL_JOB_ID;
/**
* Gridmix output directory.
*/
public static final String GRIDMIX_OUTPUT_DIR = Gridmix.GRIDMIX_OUT_DIR;
/**
* Gridmix job type (LOADJOB/SLEEPJOB).
*/
public static final String GRIDMIX_JOB_TYPE = JobCreator.GRIDMIX_JOB_TYPE;
/**
* Gridmix submission use queue.
*/
/* In Gridmix package the visibility of below mentioned
properties are protected and it have not visible outside
the package. However,it should required for system tests,
so it's re-defining in system tests config file.*/
public static final String GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE =
"gridmix.job-submission.use-queue-in-trace";
/**
* Gridmix user resolver(RoundRobinUserResolver/
* SubmitterUserResolver/EchoUserResolver).
*/
public static final String GRIDMIX_USER_RESOLVER = Gridmix.GRIDMIX_USR_RSV;
/**
* Gridmix queue depth.
*/
public static final String GRIDMIX_QUEUE_DEPTH = Gridmix.GRIDMIX_QUE_DEP;
/* In Gridmix package the visibility of below mentioned
property is protected and it should not available for
outside the package. However,it should required for
system tests, so it's re-defining in system tests config file.*/
/**
* Gridmix generate bytes per file.
*/
public static final String GRIDMIX_BYTES_PER_FILE =
"gridmix.gen.bytes.per.file";
/**
* Gridmix job submission policy(STRESS/REPLAY/SERIAL).
*/
public static final String GRIDMIX_SUBMISSION_POLICY =
"gridmix.job-submission.policy";
/**
* Gridmix minimum file size.
*/
public static final String GRIDMIX_MINIMUM_FILE_SIZE =
"gridmix.min.file.size";
/**
* Gridmix key fraction.
*/
public static final String GRIDMIX_KEY_FRC =
"gridmix.key.fraction";
/**
* Gridmix compression enable
*/
public static final String GRIDMIX_COMPRESSION_ENABLE =
"gridmix.compression-emulation.enable";
/**
* Gridmix distcache enable
*/
public static final String GRIDMIX_DISTCACHE_ENABLE =
"gridmix.distributed-cache-emulation.enable";
/**
* Gridmix input decompression enable.
*/
public static final String GRIDMIX_INPUT_DECOMPRESS_ENABLE =
"gridmix.compression-emulation.input-decompression.enable";
/**
* Gridmix input compression ratio.
*/
public static final String GRIDMIX_INPUT_COMPRESS_RATIO =
"gridmix.compression-emulation.map-input.decompression-ratio";
/**
* Gridmix intermediate compression ratio.
*/
public static final String GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO =
"gridmix.compression-emulation.map-output.compression-ratio";
/**
* Gridmix output compression ratio.
*/
public static final String GRIDMIX_OUTPUT_COMPRESSION_RATIO =
"gridmix.compression-emulation.reduce-output.compression-ratio";
/**
* Gridmix distributed cache visibilities.
*/
public static final String GRIDMIX_DISTCACHE_VISIBILITIES =
MRJobConfig.CACHE_FILE_VISIBILITIES;
/**
* Gridmix distributed cache files.
*/
public static final String GRIDMIX_DISTCACHE_FILES =
MRJobConfig.CACHE_FILES;
/**
* Gridmix distributed cache files size.
*/
public static final String GRIDMIX_DISTCACHE_FILESSIZE =
MRJobConfig.CACHE_FILES_SIZES;
/**
* Gridmix distributed cache files time stamp.
*/
public static final String GRIDMIX_DISTCACHE_TIMESTAMP =
MRJobConfig.CACHE_FILE_TIMESTAMPS;
/**
* Gridmix logger mode.
*/
public static final String GRIDMIX_LOG_MODE =
"log4j.logger.org.apache.hadoop.mapred.gridmix";
/**
* Gridmix sleep job map task only.
*/
public static final String GRIDMIX_SLEEPJOB_MAPTASK_ONLY =
SleepJob.SLEEPJOB_MAPTASK_ONLY;
/**
* Gridmix sleep map maximum time.
*/
public static final String GRIDMIX_SLEEP_MAP_MAX_TIME =
SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME;
/**
* Gridmix sleep reduce maximum time.
*/
public static final String GRIDMIX_SLEEP_REDUCE_MAX_TIME =
SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME;
/**
* Gridmix high ram job emulation enable.
*/
public static final String GRIDMIX_HIGH_RAM_JOB_ENABLE =
"gridmix.highram-emulation.enable";
/**
* Job map memory in mb.
*/
public static final String JOB_MAP_MEMORY_MB =
MRJobConfig.MAP_MEMORY_MB;
/**
* Job reduce memory in mb.
*/
public static final String JOB_REDUCE_MEMORY_MB =
MRJobConfig.REDUCE_MEMORY_MB;
/**
* Cluster map memory in mb.
*/
public static final String CLUSTER_MAP_MEMORY =
MRConfig.MAPMEMORY_MB;
/**
* Cluster reduce memory in mb.
*/
public static final String CLUSTER_REDUCE_MEMORY =
MRConfig.REDUCEMEMORY_MB;
/**
* Cluster maximum map memory.
*/
public static final String CLUSTER_MAX_MAP_MEMORY =
JTConfig.JT_MAX_MAPMEMORY_MB;
/**
* Cluster maximum reduce memory.
*/
public static final String CLUSTER_MAX_REDUCE_MEMORY =
JTConfig.JT_MAX_REDUCEMEMORY_MB;
/**
* Gridmix cpu emulation.
*/
public static final String GRIDMIX_CPU_EMULATON =
ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
/**
* Gridmix cpu usage emulation plugin.
*/
public static final String GRIDMIX_CPU_USAGE_PLUGIN =
CumulativeCpuUsageEmulatorPlugin.class.getName();
/**
* Gridmix cpu emulation custom interval.
*/
public static final String GRIDMIX_CPU_CUSTOM_INTERVAL =
CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL;
/**
* Gridmix cpu emulation lower limit.
*/
public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55;
/**
* Gridmix cpu emulation upper limit.
*/
public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130;
/**
* Gridmix heap memory custom interval
*/
public static final String GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL =
TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL;
/**
* Gridmix heap free memory ratio
*/
public static final String GRIDMIX_HEAP_FREE_MEMORY_RATIO =
TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO;
/**
* Gridmix memory emulation plugin
*/
public static final String GRIDMIX_MEMORY_EMULATION_PLUGIN =
TotalHeapUsageEmulatorPlugin.class.getName();
/**
* Gridmix memory emulation
*/
public static final String GRIDMIX_MEMORY_EMULATON =
ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
/**
* Gridmix memory emulation lower limit.
*/
public static int GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT = 55;
/**
* Gridmix memory emulation upper limit.
*/
public static int GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT = 130;
}

View File

@ -1,34 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.test.system;
/**
* Gridmix run modes.
*
*/
public enum GridMixRunMode {
DATA_GENERATION(1), RUN_GRIDMIX(2), DATA_GENERATION_AND_RUN_GRIDMIX(3);
private int mode;
GridMixRunMode (int mode) {
this.mode = mode;
}
public int getValue() {
return mode;
}
}

View File

@ -1,86 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.test.system;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.tools.rumen.ZombieJobProducer;
import org.apache.hadoop.tools.rumen.ZombieJob;
import org.apache.hadoop.conf.Configuration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* Build the job stories with a given trace file.
*/
public class GridmixJobStory {
private static Log LOG = LogFactory.getLog(GridmixJobStory.class);
private Path path;
private Map<JobID, ZombieJob> zombieJobs;
private Configuration conf;
public GridmixJobStory(Path path, Configuration conf) {
this.path = path;
this.conf = conf;
try {
zombieJobs = buildJobStories();
if(zombieJobs == null) {
throw new NullPointerException("No jobs found in a "
+ " given trace file.");
}
} catch (IOException ioe) {
LOG.warn("Error:" + ioe.getMessage());
} catch (NullPointerException npe) {
LOG.warn("Error:" + npe.getMessage());
}
}
/**
* Get the zombie jobs as a map.
* @return the zombie jobs map.
*/
public Map<JobID, ZombieJob> getZombieJobs() {
return zombieJobs;
}
/**
* Get the zombie job of a given job id.
* @param jobId - gridmix job id.
* @return - the zombie job object.
*/
public ZombieJob getZombieJob(JobID jobId) {
return zombieJobs.get(jobId);
}
private Map<JobID, ZombieJob> buildJobStories() throws IOException {
ZombieJobProducer zjp = new ZombieJobProducer(path,null, conf);
Map<JobID, ZombieJob> hm = new HashMap<JobID, ZombieJob>();
ZombieJob zj = zjp.getNextJob();
while (zj != null) {
hm.put(zj.getJobID(),zj);
zj = zjp.getNextJob();
}
if (hm.size() == 0) {
return null;
} else {
return hm;
}
}
}

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.test.system;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.test.system.JTClient;
import org.junit.Assert;
/**
* Submit the gridmix jobs.
*/
public class GridmixJobSubmission {
private static final Log LOG =
LogFactory.getLog(GridmixJobSubmission.class);
private int gridmixJobCount;
private Configuration conf;
private Path gridmixDir;
private JTClient jtClient;
public GridmixJobSubmission(Configuration conf, JTClient jtClient ,
Path gridmixDir) {
this.conf = conf;
this.jtClient = jtClient;
this.gridmixDir = gridmixDir;
}
/**
* Submit the gridmix jobs.
* @param runtimeArgs - gridmix common runtime arguments.
* @param otherArgs - gridmix other runtime arguments.
* @param traceInterval - trace time interval.
* @throws Exception
*/
public void submitJobs(String [] runtimeArgs,
String [] otherArgs, int mode) throws Exception {
int prvJobCount = jtClient.getClient().getAllJobs().length;
int exitCode = -1;
if (otherArgs == null) {
exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf,
mode, runtimeArgs);
} else {
exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, mode,
runtimeArgs, otherArgs);
}
Assert.assertEquals("Gridmix jobs have failed.", 0 , exitCode);
gridmixJobCount = jtClient.getClient().getAllJobs().length - prvJobCount;
}
/**
* Get the submitted jobs count.
* @return count of no. of jobs submitted for a trace.
*/
public int getGridmixJobCount() {
return gridmixJobCount;
}
/**
* Get the job configuration.
* @return Configuration of a submitted job.
*/
public Configuration getJobConf() {
return conf;
}
}

View File

@ -1,513 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred.gridmix.test.system;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.mapred.gridmix.Gridmix;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapreduce.JobID;
import java.util.Date;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Arrays;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.io.OutputStream;
import java.util.Set;
import java.util.List;
import java.util.Iterator;
import java.util.Map;
import java.io.File;
import java.io.FileOutputStream;
import org.apache.hadoop.test.system.ProxyUserDefinitions;
import org.apache.hadoop.test.system.ProxyUserDefinitions.GroupsAndHost;
/**
* Gridmix utilities.
*/
public class UtilsForGridmix {
private static final Log LOG = LogFactory.getLog(UtilsForGridmix.class);
private static final Path DEFAULT_TRACES_PATH =
new Path(System.getProperty("user.dir") + "/src/test/system/resources/");
/**
* cleanup the folder or file.
* @param path - folder or file path.
* @param conf - cluster configuration
* @throws IOException - If an I/O error occurs.
*/
public static void cleanup(Path path, Configuration conf)
throws IOException {
FileSystem fs = path.getFileSystem(conf);
fs.delete(path, true);
fs.close();
}
/**
* Get the login user.
* @return - login user as string..
* @throws IOException - if an I/O error occurs.
*/
public static String getUserName() throws IOException {
return UserGroupInformation.getLoginUser().getUserName();
}
/**
* Get the argument list for gridmix job.
* @param gridmixDir - gridmix parent directory.
* @param gridmixRunMode - gridmix modes either 1,2,3.
* @param values - gridmix runtime values.
* @param otherArgs - gridmix other generic args.
* @return - argument list as string array.
*/
public static String [] getArgsList(Path gridmixDir, int gridmixRunMode,
String [] values, String [] otherArgs) {
String [] runtimeArgs = {
"-D", GridMixConfig.GRIDMIX_LOG_MODE + "=DEBUG",
"-D", GridMixConfig.GRIDMIX_OUTPUT_DIR + "=gridmix",
"-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
"-D", GridMixConfig.GRIDMIX_JOB_TYPE + "=" + values[0],
"-D", GridMixConfig.GRIDMIX_USER_RESOLVER + "=" + values[1],
"-D", GridMixConfig.GRIDMIX_SUBMISSION_POLICY + "=" + values[2]
};
String [] classArgs;
if ((gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue()
|| gridmixRunMode
== GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue())
&& values[1].indexOf("RoundRobinUserResolver") > 0) {
classArgs = new String[] {
"-generate", values[3],
"-users", values[4],
gridmixDir.toString(),
values[5]
};
} else if (gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue()
|| gridmixRunMode
== GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) {
classArgs = new String[] {
"-generate", values[3],
gridmixDir.toString(),
values[4]
};
} else if (gridmixRunMode == GridMixRunMode.RUN_GRIDMIX.getValue()
&& values[1].indexOf("RoundRobinUserResolver") > 0) {
classArgs = new String[] {
"-users", values[3],
gridmixDir.toString(),
values[4]
};
} else {
classArgs = new String[] {
gridmixDir.toString(),values[3]
};
}
String [] args = new String [runtimeArgs.length +
classArgs.length + ((otherArgs != null)?otherArgs.length:0)];
System.arraycopy(runtimeArgs, 0, args, 0, runtimeArgs.length);
if (otherArgs != null) {
System.arraycopy(otherArgs, 0, args, runtimeArgs.length,
otherArgs.length);
System.arraycopy(classArgs, 0, args, (runtimeArgs.length +
otherArgs.length), classArgs.length);
} else {
System.arraycopy(classArgs, 0, args, runtimeArgs.length,
classArgs.length);
}
return args;
}
/**
* Create a file with specified size in mb.
* @param sizeInMB - file size in mb.
* @param inputDir - input directory.
* @param conf - cluster configuration.
* @throws Exception - if an exception occurs.
*/
public static void createFile(int sizeInMB, Path inputDir,
Configuration conf) throws Exception {
Date d = new Date();
SimpleDateFormat sdf = new SimpleDateFormat("ddMMyy_HHmmssS");
String formatDate = sdf.format(d);
FileSystem fs = inputDir.getFileSystem(conf);
OutputStream out = fs.create(new Path(inputDir,"datafile_" + formatDate));
final byte[] b = new byte[1024 * 1024];
for (int index = 0; index < sizeInMB; index++) {
out.write(b);
}
out.close();
fs.close();
}
/**
* Create directories for a path.
* @param path - directories path.
* @param conf - cluster configuration.
* @throws IOException - if an I/O error occurs.
*/
public static void createDirs(Path path,Configuration conf)
throws IOException {
FileSystem fs = path.getFileSystem(conf);
if (!fs.exists(path)) {
fs.mkdirs(path);
}
}
/**
* Run the Gridmix job with given runtime arguments.
* @param gridmixDir - Gridmix parent directory.
* @param conf - cluster configuration.
* @param gridmixRunMode - gridmix run mode either 1,2,3
* @param runtimeValues -gridmix runtime values.
* @return - gridmix status either 0 or 1.
* @throws Exception
*/
public static int runGridmixJob(Path gridmixDir, Configuration conf,
int gridmixRunMode, String [] runtimeValues) throws Exception {
return runGridmixJob(gridmixDir, conf, gridmixRunMode, runtimeValues, null);
}
/**
* Run the Gridmix job with given runtime arguments.
* @param gridmixDir - Gridmix parent directory
* @param conf - cluster configuration.
* @param gridmixRunMode - gridmix run mode.
* @param runtimeValues - gridmix runtime values.
* @param otherArgs - gridmix other generic args.
* @return - gridmix status either 0 or 1.
* @throws Exception
*/
public static int runGridmixJob(Path gridmixDir, Configuration conf,
int gridmixRunMode, String [] runtimeValues,
String [] otherArgs) throws Exception {
Path outputDir = new Path(gridmixDir, "gridmix");
Path inputDir = new Path(gridmixDir, "input");
LOG.info("Cleanup the data if data already exists.");
String modeName = new String();
switch (gridmixRunMode) {
case 1 :
cleanup(inputDir, conf);
cleanup(outputDir, conf);
modeName = GridMixRunMode.DATA_GENERATION.name();
break;
case 2 :
cleanup(outputDir, conf);
modeName = GridMixRunMode.RUN_GRIDMIX.name();
break;
case 3 :
cleanup(inputDir, conf);
cleanup(outputDir, conf);
modeName = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.name();
break;
}
final String [] args =
UtilsForGridmix.getArgsList(gridmixDir, gridmixRunMode,
runtimeValues, otherArgs);
Gridmix gridmix = new Gridmix();
LOG.info("Submit a Gridmix job in " + runtimeValues[1]
+ " mode for " + modeName);
int exitCode = ToolRunner.run(conf, gridmix, args);
return exitCode;
}
/**
* Get the proxy users file.
* @param conf - cluster configuration.
* @return String - proxy users file.
* @Exception - if no proxy users found in configuration.
*/
public static String getProxyUsersFile(Configuration conf)
throws Exception {
ProxyUserDefinitions pud = getProxyUsersData(conf);
String fileName = buildProxyUsersFile(pud.getProxyUsers());
if (fileName == null) {
LOG.error("Proxy users file not found.");
throw new Exception("Proxy users file not found.");
} else {
return fileName;
}
}
/**
* List the current gridmix jobid's.
* @param client - job client.
* @param execJobCount - number of executed jobs.
* @return - list of gridmix jobid's.
*/
public static List<JobID> listGridmixJobIDs(JobClient client,
int execJobCount) throws IOException {
List<JobID> jobids = new ArrayList<JobID>();
JobStatus [] jobStatus = client.getAllJobs();
int numJobs = jobStatus.length;
for (int index = 1; index <= execJobCount; index++) {
JobStatus js = jobStatus[numJobs - index];
JobID jobid = js.getJobID();
String jobName = js.getJobName();
if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") &&
!jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) {
jobids.add(jobid);
}
}
return (jobids.size() == 0)? null : jobids;
}
/**
* List the proxy users.
* @param conf
* @return
* @throws Exception
*/
public static List<String> listProxyUsers(Configuration conf,
String loginUser) throws Exception {
List<String> proxyUsers = new ArrayList<String>();
ProxyUserDefinitions pud = getProxyUsersData(conf);
Map<String, GroupsAndHost> usersData = pud.getProxyUsers();
Collection users = usersData.keySet();
Iterator<String> itr = users.iterator();
while (itr.hasNext()) {
String user = itr.next();
if (!user.equals(loginUser)){ proxyUsers.add(user); };
}
return proxyUsers;
}
private static String buildProxyUsersFile(final Map<String, GroupsAndHost>
proxyUserData) throws Exception {
FileOutputStream fos = null;
File file = null;
StringBuffer input = new StringBuffer();
Set users = proxyUserData.keySet();
Iterator itr = users.iterator();
while (itr.hasNext()) {
String user = itr.next().toString();
if (!user.equals(
UserGroupInformation.getLoginUser().getShortUserName())) {
input.append(user);
final GroupsAndHost gah = proxyUserData.get(user);
final List <String> groups = gah.getGroups();
for (String group : groups) {
input.append(",");
input.append(group);
}
input.append("\n");
}
}
if (input.length() > 0) {
try {
file = File.createTempFile("proxyusers", null);
fos = new FileOutputStream(file);
fos.write(input.toString().getBytes());
} catch(IOException ioexp) {
LOG.warn(ioexp.getMessage());
return null;
} finally {
fos.close();
file.deleteOnExit();
}
LOG.info("file.toString():" + file.toString());
return file.toString();
} else {
return null;
}
}
private static ProxyUserDefinitions getProxyUsersData(Configuration conf)
throws Exception {
Iterator itr = conf.iterator();
List<String> proxyUsersData = new ArrayList<String>();
while (itr.hasNext()) {
String property = itr.next().toString();
if (property.indexOf("hadoop.proxyuser") >= 0
&& property.indexOf("groups=") >= 0) {
proxyUsersData.add(property.split("\\.")[2]);
}
}
if (proxyUsersData.size() == 0) {
LOG.error("No proxy users found in the configuration.");
throw new Exception("No proxy users found in the configuration.");
}
ProxyUserDefinitions pud = new ProxyUserDefinitions() {
public boolean writeToFile(URI filePath) throws IOException {
throw new UnsupportedOperationException("No such methood exists.");
};
};
for (String userName : proxyUsersData) {
List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." +
userName + ".groups").split("//,"));
List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." +
userName + ".hosts").split("//,"));
ProxyUserDefinitions.GroupsAndHost definitions =
pud.new GroupsAndHost();
definitions.setGroups(groups);
definitions.setHosts(hosts);
pud.addProxyUser(userName, definitions);
}
return pud;
}
/**
* Gives the list of paths for MR traces against different time
* intervals.It fetches only the paths which followed the below
* file convention.
* Syntax : &lt;FileName&gt;_&lt;TimeIntervals&gt;.json.gz
* There is a restriction in a file and user has to
* follow the below convention for time interval.
* Syntax: &lt;numeric&gt;[m|h|d]
* e.g : for 10 minutes trace should specify 10m,
* same way for 1 hour traces should specify 1h,
* for 1 day traces should specify 1d.
*
* @param conf - cluster configuration.
* @return - list of MR paths as key/value pair based on time interval.
* @throws IOException - if an I/O error occurs.
*/
public static Map<String, String> getMRTraces(Configuration conf)
throws IOException {
return getMRTraces(conf, DEFAULT_TRACES_PATH);
}
/**
* It gives the list of paths for MR traces against different time
* intervals. It fetches only the paths which followed the below
* file convention.
* Syntax : &lt;FileNames&gt;_&lt;TimeInterval&gt;.json.gz
* There is a restriction in a file and user has to follow the
* below convention for time interval.
* Syntax: &lt;numeric&gt;[m|h|d]
* e.g : for 10 minutes trace should specify 10m,
* same way for 1 hour traces should specify 1h,
* for 1 day traces should specify 1d.
*
* @param conf - cluster configuration object.
* @param tracesPath - MR traces path.
* @return - list of MR paths as key/value pair based on time interval.
* @throws IOException - If an I/O error occurs.
*/
public static Map<String,String> getMRTraces(Configuration conf,
Path tracesPath) throws IOException {
Map <String, String> jobTraces = new HashMap <String, String>();
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus fstat[] = fs.listStatus(tracesPath);
for (FileStatus fst : fstat) {
final String fileName = fst.getPath().getName();
if (fileName.endsWith("m.json.gz")
|| fileName.endsWith("h.json.gz")
|| fileName.endsWith("d.json.gz")) {
jobTraces.put(fileName.substring(fileName.indexOf("_") + 1,
fileName.indexOf(".json.gz")), fst.getPath().toString());
}
}
if (jobTraces.size() == 0) {
LOG.error("No traces found in " + tracesPath.toString() + " path.");
throw new IOException("No traces found in "
+ tracesPath.toString() + " path.");
}
return jobTraces;
}
/**
* It list the all the MR traces path irrespective of time.
* @param conf - cluster configuration.
* @param tracesPath - MR traces path
* @return - MR paths as a list.
* @throws IOException - if an I/O error occurs.
*/
public static List<String> listMRTraces(Configuration conf,
Path tracesPath) throws IOException {
List<String> jobTraces = new ArrayList<String>();
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus fstat[] = fs.listStatus(tracesPath);
for (FileStatus fst : fstat) {
jobTraces.add(fst.getPath().toString());
}
if (jobTraces.size() == 0) {
LOG.error("No traces found in " + tracesPath.toString() + " path.");
throw new IOException("No traces found in "
+ tracesPath.toString() + " path.");
}
return jobTraces;
}
/**
* It list the all the MR traces path irrespective of time.
* @param conf - cluster configuration.
* @param tracesPath - MR traces path
* @return - MR paths as a list.
* @throws IOException - if an I/O error occurs.
*/
public static List<String> listMRTraces(Configuration conf)
throws IOException {
return listMRTraces(conf, DEFAULT_TRACES_PATH);
}
/**
* Gives the list of MR traces for given time interval.
* The time interval should be following convention.
* Syntax : &lt;numeric&gt;[m|h|d]
* e.g : 10m or 1h or 2d etc.
* @param conf - cluster configuration
* @param timeInterval - trace time interval.
* @param tracesPath - MR traces Path.
* @return - MR paths as a list for a given time interval.
* @throws IOException - If an I/O error occurs.
*/
public static List<String> listMRTracesByTime(Configuration conf,
String timeInterval, Path tracesPath) throws IOException {
List<String> jobTraces = new ArrayList<String>();
final FileSystem fs = FileSystem.getLocal(conf);
final FileStatus fstat[] = fs.listStatus(tracesPath);
for (FileStatus fst : fstat) {
final String fileName = fst.getPath().getName();
if (fileName.indexOf(timeInterval) >= 0) {
jobTraces.add(fst.getPath().toString());
}
}
return jobTraces;
}
/**
* Gives the list of MR traces for given time interval.
* The time interval should be following convention.
* Syntax : &lt;numeric&gt;[m|h|d]
* e.g : 10m or 1h or 2d etc.
* @param conf - cluster configuration
* @param timeInterval - trace time interval.
* @return - MR paths as a list for a given time interval.
* @throws IOException - If an I/O error occurs.
*/
public static List<String> listMRTracesByTime(Configuration conf,
String timeInterval) throws IOException {
return listMRTracesByTime(conf, timeInterval, DEFAULT_TRACES_PATH);
}
}

Some files were not shown because too many files have changed in this diff Show More