HADOOP-6332. Large-scale Automated Test Framework. Contributed by Sharad Agarwal, Sreekanth Ramakrishnan, Konstantin Boudnik, at all.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@944521 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Konstantin Boudnik 2010-05-14 23:56:34 +00:00
parent 1e15cf0355
commit 69693b6a86
26 changed files with 8385 additions and 77 deletions

View File

@ -125,6 +125,9 @@ Release 0.21.0 - Unreleased
NEW FEATURES
HADOOP-6332. Large-scale Automated Test Framework. (sharad, Sreekanth
Ramakrishnan, at all via cos)
HADOOP-4268. Change fsck to use ClientProtocol methods so that the
corresponding permission requirement for running the ClientProtocol
methods will be enforced. (szetszwo)

158
build.xml
View File

@ -92,6 +92,7 @@
<property name="test.core.build.classes" value="${test.build.dir}/core/classes"/>
<property name="test.all.tests.file" value="${test.src.dir}/all-tests"/>
<property name="javadoc.link.java"
value="http://java.sun.com/javase/6/docs/api/"/>
@ -565,6 +566,7 @@
description="Make hadoop-fi.jar">
<macro-jar-fault-inject
target.name="jar"
build.dir="${build-fi.dir}"
jar.final.name="final.name"
jar.final.value="${final.name}-fi" />
</target>
@ -618,71 +620,96 @@
<!-- ================================================================== -->
<!-- Run unit tests -->
<!-- ================================================================== -->
<target name="run-test-core" depends="compile-core-test" description="Run core unit tests">
<macrodef name="macro-test-runner">
<attribute name="test.file" />
<attribute name="classpath" />
<attribute name="test.dir" />
<attribute name="fileset.dir" />
<attribute name="hadoop.conf.dir.deployed" default="" />
<sequential>
<delete file="${test.build.dir}/testsfailed"/>
<delete dir="@{test.dir}/data" />
<mkdir dir="@{test.dir}/data" />
<delete dir="@{test.dir}/logs" />
<mkdir dir="@{test.dir}/logs" />
<copy file="${test.src.dir}/hadoop-policy.xml"
todir="@{test.dir}/extraconf" />
<copy file="${test.src.dir}/fi-site.xml"
todir="@{test.dir}/extraconf" />
<junit showoutput="${test.output}"
printsummary="${test.junit.printsummary}"
haltonfailure="${test.junit.haltonfailure}"
fork="yes"
forkmode="${test.junit.fork.mode}"
maxmemory="${test.junit.maxmemory}"
dir="${basedir}"
timeout="${test.timeout}"
errorProperty="tests.failed"
failureProperty="tests.failed">
<jvmarg value="-ea" />
<sysproperty key="test.build.data" value="${test.build.data}" />
<sysproperty key="test.cache.data" value="${test.cache.data}" />
<sysproperty key="test.debug.data" value="${test.debug.data}" />
<sysproperty key="hadoop.log.dir" value="${test.log.dir}" />
<sysproperty key="test.src.dir" value="${test.src.dir}" />
<sysproperty key="test.build.extraconf" value="@{test.dir}/extraconf" />
<sysproperty key="hadoop.policy.file" value="hadoop-policy.xml" />
<sysproperty key="java.library.path"
value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
<sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
<!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
<syspropertyset dynamic="no">
<propertyref name="io.compression.codec.lzo.class"/>
</syspropertyset>
<!-- set compile.c++ in the child jvm only if it is set -->
<syspropertyset dynamic="no">
<propertyref name="compile.c++"/>
</syspropertyset>
<classpath refid="@{classpath}" />
<!-- Pass probability specifications to the spawn JVM -->
<syspropertyset id="FaultProbabilityProperties">
<propertyref regex="fi.*"/>
</syspropertyset>
<sysproperty key="test.system.hdrc.deployed.hadoopconfdir"
value="@{hadoop.conf.dir.deployed}" />
<formatter type="${test.junit.output.format}" />
<batchtest todir="@{test.dir}" if="tests.notestcase">
<fileset dir="@{fileset.dir}/core"
excludes="**/${test.exclude}.java aop/** system/**">
<patternset>
<includesfile name="@{test.file}"/>
</patternset>
</fileset>
</batchtest>
<batchtest todir="${test.build.dir}" if="tests.notestcase.fi">
<fileset dir="@{fileset.dir}/aop"
includes="**/${test.include}.java"
excludes="**/${test.exclude}.java" />
</batchtest>
<batchtest todir="@{test.dir}" if="tests.testcase">
<fileset dir="@{fileset.dir}/core"
includes="**/${testcase}.java" excludes="aop/** system/**"/>
</batchtest>
<batchtest todir="${test.build.dir}" if="tests.testcase.fi">
<fileset dir="@{fileset.dir}/aop" includes="**/${testcase}.java" />
</batchtest>
<!--The following batch is for very special occasions only when
a non-FI tests are needed to be executed against FI-environment -->
<batchtest todir="${test.build.dir}" if="tests.testcaseonly">
<fileset dir="@{fileset.dir}/core" includes="**/${testcase}.java" />
</batchtest>
</junit>
<antcall target="checkfailure"/>
</sequential>
</macrodef>
<delete dir="${test.build.data}"/>
<mkdir dir="${test.build.data}"/>
<delete dir="${test.log.dir}"/>
<mkdir dir="${test.log.dir}"/>
<copy file="${test.src.dir}/hadoop-policy.xml"
todir="${test.build.extraconf}" />
<copy file="${test.src.dir}/fi-site.xml"
todir="${test.build.extraconf}" />
<junit showoutput="${test.output}"
printsummary="${test.junit.printsummary}"
haltonfailure="${test.junit.haltonfailure}"
fork="yes"
forkmode="${test.junit.fork.mode}"
maxmemory="${test.junit.maxmemory}"
dir="${basedir}" timeout="${test.timeout}"
errorProperty="tests.failed" failureProperty="tests.failed">
<jvmarg value="-ea" />
<sysproperty key="test.build.data" value="${test.build.data}"/>
<sysproperty key="test.cache.data" value="${test.cache.data}"/>
<sysproperty key="test.debug.data" value="${test.debug.data}"/>
<sysproperty key="hadoop.log.dir" value="${test.log.dir}"/>
<sysproperty key="test.src.dir" value="${test.src.dir}/core"/>
<sysproperty key="test.build.extraconf" value="${test.build.extraconf}" />
<sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/>
<sysproperty key="java.library.path"
value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
<sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
<!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
<syspropertyset dynamic="no">
<propertyref name="io.compression.codec.lzo.class"/>
</syspropertyset>
<!-- set compile.c++ in the child jvm only if it is set -->
<syspropertyset dynamic="no">
<propertyref name="compile.c++"/>
</syspropertyset>
<classpath refid="test.classpath"/>
<syspropertyset id="FaultProbabilityProperties">
<propertyref regex="fi.*"/>
</syspropertyset>
<formatter type="${test.junit.output.format}" />
<batchtest todir="${test.build.dir}" if="tests.notestcase">
<fileset dir="${test.src.dir}/core"
includes="**/${test.include}.java"
excludes="**/${test.exclude}.java" />
</batchtest>
<batchtest todir="${test.build.dir}" if="tests.notestcase.fi">
<fileset dir="${test.src.dir}/aop"
includes="**/${test.include}.java"
excludes="**/${test.exclude}.java" />
</batchtest>
<batchtest todir="${test.build.dir}" if="tests.testcase">
<fileset dir="${test.src.dir}/core" includes="**/${testcase}.java"/>
</batchtest>
<batchtest todir="${test.build.dir}" if="tests.testcase.fi">
<fileset dir="${test.src.dir}/aop" includes="**/${testcase}.java"/>
</batchtest>
<!--The following batch is for very special occasions only when
a non-FI tests are needed to be executed against FI-environment -->
<batchtest todir="${test.build.dir}" if="tests.testcaseonly">
<fileset dir="${test.src.dir}/core" includes="**/${testcase}.java"/>
</batchtest>
</junit>
<antcall target="checkfailure"/>
<target name="run-test-core" depends="compile-core-test" description="Run core unit tests">
<macro-test-runner test.file="${test.all.tests.file}"
classpath="${test.classpath.id}"
test.dir="${test.build.dir}"
fileset.dir="${test.src.dir}"
>
</macro-test-runner>
</target>
<target name="checkfailure" if="tests.failed">
@ -1175,7 +1202,8 @@
classpathref="mvn-ant-task.classpath"/>
</target>
<target name="mvn-install" depends="mvn-taskdef,jar,jar-test,set-version"
<target name="mvn-install" depends="mvn-taskdef,jar,jar-test,
-mvn-system-install,set-version"
description="To install hadoop core and test jars to local filesystem's m2 cache">
<artifact:pom file="${hadoop-core.pom}" id="hadoop.core"/>
<artifact:pom file="${hadoop-core-test.pom}" id="hadoop.core.test"/>

View File

@ -0,0 +1,127 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<packaging>jar</packaging>
<version>@version</version>
<dependencies>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
<version>0.52</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.0.1</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.4</version>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>1.4.1</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
<version>5.5.12</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
<version>5.5.12</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
<version>0.7.1</version>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>1.4.1</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
<version>0.3</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.5</version>
</dependency>
<dependency>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
<version>1.8.0.10</version>
</dependency>
<dependency>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
<version>2.0.8</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>avro</artifactId>
<version>1.3.0</version>
</dependency>
</dependencies>
</project>

258
ivy/hadoop-core-system.pom Normal file
View File

@ -0,0 +1,258 @@
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-system</artifactId>
<packaging>jar</packaging>
<version>${hadoop.version}</version>
<description>
Hadoop is the distributed computing framework of Apache;
hadoop-core-system contains shared classes of embeded
Hadoop test framework for system testing.
</description>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://apache.org/licenses/LICENSE-2.0</url>
</license>
</licenses>
<dependencies>
<!-- always include commons-logging and log4J -->
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>avalon-framework</groupId>
<artifactId>avalon-framework</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</exclusion>
<exclusion>
<groupId>logkit</groupId>
<artifactId>logkit</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<version>${log4j.version}</version>
<scope>optional</scope>
<exclusions>
<exclusion>
<groupId>javax.mail</groupId>
<artifactId>mail</artifactId>
</exclusion>
<exclusion>
<groupId>javax.jms</groupId>
<artifactId>jms</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jdmk</groupId>
<artifactId>jmxtools</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jmx</groupId>
<artifactId>jmxri</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--SLF4J is a JAR-based dependency; this POM binds it to log4J-->
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>${slf4j-api.version}</version>
<scope>optional</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<version>${slf4j-log4j12.version}</version>
<scope>optional</scope>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--Httpclient and its components are optional-->
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.1</version>
<scope>optional</scope>
<exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.3</version>
<scope>optional</scope>
</dependency>
<!--CLI is needed to scan the command line, but only the 1.0 branch is released -->
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>2.0-20070823</version>
<scope>optional</scope>
</dependency>
<!-- this is used for the ftp:// filesystem-->
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>1.4.1</version>
<scope>optional</scope>
</dependency>
<!-- Jetty is used to serve up the application. It is marked as optional because
clients do not need it. All server-side deployments will need
all of these files.-->
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
<version>${servlet-api.version}</version>
<scope>optional</scope>
</dependency>
<dependency>
<groupId>jetty</groupId>
<artifactId>org.mortbay.jetty</artifactId>
<version>${jetty.version}</version>
<scope>optional</scope>
</dependency>
<!--JSP support -->
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>${jetty.version}</version>
<scope>optional</scope>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
<version>${jetty.version}</version>
<scope>optional</scope>
</dependency>
<dependency>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
<version>${commons-el.version}</version>
<scope>optional</scope>
</dependency>
<!--JSPC assistance-->
<dependency>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
<version>${core.version}</version>
<scope>optional</scope>
</dependency>
<dependency>
<groupId>org.apache.ant</groupId>
<artifactId>ant</artifactId>
<version>${apacheant.version}</version>
<scope>optional</scope>
</dependency>
<!-- JetS3t is a client library for S3.
-It is only needed if you want to work with S3 filesystems
-It pulls in commons-logging 1.1.1 and does not exclude all the cruft that comes with it.
By excluding it we stay in control of versions and dependencies
-->
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
<version>${jets3t.version}</version>
<scope>optional</scope>
<exclusions>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</exclusion>
</exclusions>
</dependency>
<!--Kosmos filesystem
http://kosmosfs.sourceforge.net/
This is not in the central repository
-->
<!--
<dependency>
<groupId>org.kosmix</groupId>
<artifactId>kfs</artifactId>
<version>0.1</version>
<scope>optional</scope>
</dependency>
-->
<!--
http://xmlenc.sourceforge.net/
"The xmlenc library is a fast stream-based XML output library for Java."
-->
<dependency>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
<version>0.52</version>
<scope>optional</scope>
</dependency>
</dependencies>
</project>

127
ivy/hadoop-core-system.xml Normal file
View File

@ -0,0 +1,127 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core-system</artifactId>
<packaging>jar</packaging>
<version>0.22.0-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
<version>0.52</version>
</dependency>
<dependency>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
<version>3.0.1</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.4</version>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>1.4.1</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
<version>5.5.12</version>
</dependency>
<dependency>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
<version>5.5.12</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>commons-el</groupId>
<artifactId>commons-el</artifactId>
<version>1.0</version>
</dependency>
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
<version>0.7.1</version>
</dependency>
<dependency>
<groupId>commons-net</groupId>
<artifactId>commons-net</artifactId>
<version>1.4.1</version>
</dependency>
<dependency>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
<version>6.1.14</version>
</dependency>
<dependency>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
<version>0.3</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.5</version>
</dependency>
<dependency>
<groupId>hsqldb</groupId>
<artifactId>hsqldb</artifactId>
<version>1.8.0.10</version>
</dependency>
<dependency>
<groupId>oro</groupId>
<artifactId>oro</artifactId>
<version>2.0.8</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>avro</artifactId>
<version>1.3.0</version>
</dependency>
</dependencies>
</project>

1
src/test/all-tests Normal file
View File

@ -0,0 +1 @@
**/Test*.java

View File

@ -14,13 +14,27 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<project name="aspects">
<project name="aspects"
xmlns:artifact="urn:maven-artifact-ant">
<!-- Properties common for all fault injections -->
<property name="build-fi.dir" value="${basedir}/build-fi"/>
<property name="hadoop-fi.jar" location="${build.dir}/${final.name}-fi.jar" />
<property name="compile-inject.output" value="${build-fi.dir}/compile-fi.log"/>
<property name="aspectversion" value="1.6.5"/>
<property file="${basedir}/build.properties"/>
<!-- Properties related to system fault injection and tests -->
<property name="system-test-build-dir" value="${build-fi.dir}/system"/>
<!-- Properties specifically for system fault-injections and system tests -->
<property name="hadoop-core-system.pom"
location="${ivy.dir}/hadoop-core-system.xml" />
<property name="hadoop-core-system.jar"
location="${system-test-build-dir}/${final.name}-system.jar" />
<property name="hadoop-core-system-sources.jar"
location="${system-test-build-dir}/${final.name}-system-sources.jar" />
<!--All Fault Injection (FI) related targets are located in this session -->
<target name="clean-fi">
@ -46,15 +60,19 @@
<echo message="Start weaving aspects in place"/>
<iajc
encoding="${build.encoding}"
srcdir="${java.src.dir};${build.src};${test.src.dir}/aop"
includes="org/apache/hadoop/**/*.java, org/apache/hadoop/**/*.aj"
srcdir="${java.src.dir};${build.src};${src.dir.path}"
includes="**/org/apache/hadoop/**/*.java, **/org/apache/hadoop/**/*.aj"
excludes="org/apache/hadoop/classification/tools/**/*, org/apache/hadoop/record/**/*"
destDir="${build.classes}"
destDir="${dest.dir}"
debug="${javac.debug}"
target="${javac.version}"
source="${javac.version}"
deprecation="${javac.deprecation}">
<classpath refid="test.classpath"/>
<classpath>
<path refid="test.classpath"/>
</classpath>
</iajc>
<loadfile property="injection.failure" srcfile="${compile-inject.output}">
<filterchain>
@ -69,15 +87,76 @@
<echo message="Weaving of aspects is finished"/>
</target>
<target name="injectfaults"
description="Instrument classes with faults and other AOP advices">
<!-- Classpath for running system tests -->
<path id="test.system.classpath">
<pathelement location="${hadoop.conf.dir.deployed}" />
<pathelement location="${system-test-build-dir}/test/extraconf" />
<pathelement location="${system-test-build-dir}/test/classes" />
<pathelement location="${system-test-build-dir}/classes" />
<pathelement location="${test.src.dir}" />
<pathelement location="${build-fi.dir}" />
<pathelement location="${build-fi.dir}/tools" />
<pathelement path="${clover.jar}" />
<fileset dir="${test.lib.dir}">
<include name="**/*.jar" />
<exclude name="**/excluded/" />
</fileset>
<fileset dir="${system-test-build-dir}">
<include name="**/*.jar" />
<exclude name="**/excluded/" />
</fileset>
<path refid="classpath" />
</path>
<target name="injectfaults"
description="Instrument classes with faults and other AOP advices">
<!--mkdir to prevent <subant> failure in case the folder has been removed-->
<mkdir dir="${build-fi.dir}"/>
<delete file="${compile-inject.output}"/>
<subant buildpath="${basedir}" target="compile-fault-inject"
output="${compile-inject.output}">
<property name="build.dir" value="${build-fi.dir}"/>
</subant>
<weave-injectfault-aspects dest.dir="${build-fi.dir}/classes"
src.dir="${test.src.dir}/aop">
</weave-injectfault-aspects>
</target>
<!-- =============================================================== -->
<!-- Create hadoop-{version}-dev-core.jar required to be deployed on -->
<!-- cluster for system tests -->
<!-- =============================================================== -->
<target name="jar-system"
depends="inject-system-faults"
description="make hadoop.jar">
<macro-jar-fault-inject target.name="jar"
build.dir="${system-test-build-dir}"
jar.final.name="final.name"
jar.final.value="${final.name}-system">
</macro-jar-fault-inject>
<jar jarfile="${system-test-build-dir}/${final.name}-system-sources.jar"
update="yes">
<fileset dir="${test.src.dir}/system/java" includes="org/apache/hadoop/**/*.java"/>
<fileset dir="${test.src.dir}/system/aop" includes="org/apache/hadoop/**/*.aj"/>
</jar>
</target>
<macrodef name="weave-injectfault-aspects">
<attribute name="dest.dir" />
<attribute name="src.dir" />
<sequential>
<subant buildpath="build.xml" target="compile-fault-inject"
output="${compile-inject.output}">
<property name="build.dir" value="${build-fi.dir}" />
<property name="src.dir.path" value="@{src.dir}" />
<property name="dest.dir" value="@{dest.dir}" />
</subant>
</sequential>
</macrodef>
<target name="inject-system-faults" description="Inject system faults">
<property name="build-fi.dir" value="${system-test-build-dir}" />
<mkdir dir="${build-fi.dir}"/>
<delete file="${compile-inject.output}"/>
<weave-injectfault-aspects dest.dir="${system-test-build-dir}/classes"
src.dir="${test.src.dir}/system">
</weave-injectfault-aspects>
</target>
<macrodef name="macro-run-tests-fault-inject">
@ -99,11 +178,12 @@
<!-- ================================================================== -->
<macrodef name="macro-jar-fault-inject">
<attribute name="target.name" />
<attribute name="build.dir" />
<attribute name="jar.final.name" />
<attribute name="jar.final.value" />
<sequential>
<subant buildpath="build.xml" target="@{target.name}">
<property name="build.dir" value="${build-fi.dir}"/>
<property name="build.dir" value="@{build.dir}"/>
<property name="@{jar.final.name}" value="@{jar.final.value}"/>
<property name="jar.extra.properties.list"
value="${test.src.dir}/fi-site.xml" />
@ -129,4 +209,53 @@
</macrodef>
<!--End of Fault Injection (FI) related session-->
<!-- Start of cluster controller binary target -->
<property name="runAs.src"
value ="${test.src.dir}/system/c++/runAs"/>
<property name="runAs.build.dir"
value="${system-test-build-dir}/c++-build"/>
<property name="runAs.configure.script"
value="${runAs.build.dir}/configure"/>
<target name="init-runAs-build">
<condition property="runAs.parameters.passed">
<not>
<equals arg1="${run-as.hadoop.home.dir}"
arg2="$${run-as.hadoop.home.dir}"/>
</not>
</condition>
<fail unless="runAs.parameters.passed"
message="Required parameters run-as.hadoop.home.dir not passed to the build"/>
<mkdir dir="${runAs.build.dir}"/>
<copy todir="${runAs.build.dir}" overwrite="true">
<fileset dir="${runAs.src}" includes="**/*"/>
</copy>
<chmod perm="+x" file="${runAs.configure.script}">
</chmod>
</target>
<target name="configure-runAs"
depends="init-runAs-build">
<exec executable="${runAs.configure.script}"
dir="${runAs.build.dir}" failonerror="true">
<arg value="--with-home=${run-as.hadoop.home.dir}"/>
</exec>
</target>
<target name="run-as" depends="configure-runAs">
<exec executable="${make.cmd}" dir="${runAs.build.dir}"
searchpath="yes" failonerror="yes">
<arg value="all" />
</exec>
</target>
<!-- End of cluster controller binary target -->
<!-- Maven -->
<target name="-mvn-system-install" depends="mvn-taskdef, jar-system">
<artifact:pom file="${hadoop-core-system.pom}" id="hadoop.core.system"/>
<artifact:install file="${hadoop-core-system.jar}">
<pom refid="hadoop.core.system"/>
<attach file="${hadoop-core-system-sources.jar}" classifier="sources" />
</artifact:install>
</target>
</project>

View File

@ -0,0 +1,287 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.util.Properties;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
/**
* Default DaemonProtocolAspect which is used to provide default implementation
* for all the common daemon methods. If a daemon requires more specialized
* version of method, it is responsibility of the DaemonClient to introduce the
* same in woven classes.
*
*/
public aspect DaemonProtocolAspect {
private boolean DaemonProtocol.ready;
@SuppressWarnings("unchecked")
private HashMap<Object, List<ControlAction>> DaemonProtocol.actions =
new HashMap<Object, List<ControlAction>>();
private static final Log LOG = LogFactory.getLog(
DaemonProtocolAspect.class.getName());
/**
* Set if the daemon process is ready or not, concrete daemon protocol should
* implement pointcuts to determine when the daemon is ready and use the
* setter to set the ready state.
*
* @param ready
* true if the Daemon is ready.
*/
public void DaemonProtocol.setReady(boolean ready) {
this.ready = ready;
}
/**
* Checks if the daemon process is alive or not.
*
* @throws IOException
* if daemon is not alive.
*/
public void DaemonProtocol.ping() throws IOException {
}
/**
* Checks if the daemon process is ready to accepting RPC connections after it
* finishes initialization. <br/>
*
* @return true if ready to accept connection.
*
* @throws IOException
*/
public boolean DaemonProtocol.isReady() throws IOException {
return ready;
}
/**
* Returns the process related information regarding the daemon process. <br/>
*
* @return process information.
* @throws IOException
*/
public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException {
int activeThreadCount = Thread.activeCount();
long currentTime = System.currentTimeMillis();
long maxmem = Runtime.getRuntime().maxMemory();
long freemem = Runtime.getRuntime().freeMemory();
long totalmem = Runtime.getRuntime().totalMemory();
Map<String, String> envMap = System.getenv();
Properties sysProps = System.getProperties();
Map<String, String> props = new HashMap<String, String>();
for (Map.Entry entry : sysProps.entrySet()) {
props.put((String) entry.getKey(), (String) entry.getValue());
}
ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime,
freemem, maxmem, totalmem, envMap, props);
return info;
}
public void DaemonProtocol.enable(List<Enum<?>> faults) throws IOException {
}
public void DaemonProtocol.disableAll() throws IOException {
}
public abstract Configuration DaemonProtocol.getDaemonConf()
throws IOException;
public FileStatus DaemonProtocol.getFileStatus(String path, boolean local)
throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
p.makeQualified(fs);
FileStatus fileStatus = fs.getFileStatus(p);
return cloneFileStatus(fileStatus);
}
public FileStatus[] DaemonProtocol.listStatus(String path, boolean local)
throws IOException {
Path p = new Path(path);
FileSystem fs = getFS(p, local);
FileStatus[] status = fs.listStatus(p);
if (status != null) {
FileStatus[] result = new FileStatus[status.length];
int i = 0;
for (FileStatus fileStatus : status) {
result[i++] = cloneFileStatus(fileStatus);
}
return result;
}
return status;
}
/**
* FileStatus object may not be serializable. Clone it into raw FileStatus
* object.
*/
private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) {
return new FileStatus(fileStatus.getLen(),
fileStatus.isDir(),
fileStatus.getReplication(),
fileStatus.getBlockSize(),
fileStatus.getModificationTime(),
fileStatus.getAccessTime(),
fileStatus.getPermission(),
fileStatus.getOwner(),
fileStatus.getGroup(),
fileStatus.getPath());
}
private FileSystem DaemonProtocol.getFS(final Path path, final boolean local)
throws IOException {
FileSystem ret = null;
try {
ret = UserGroupInformation.getLoginUser().doAs (
new PrivilegedExceptionAction<FileSystem>() {
public FileSystem run() throws IOException {
FileSystem fs = null;
if (local) {
fs = FileSystem.getLocal(getDaemonConf());
} else {
fs = path.getFileSystem(getDaemonConf());
}
return fs;
}
});
} catch (InterruptedException ie) {
}
return ret;
}
@SuppressWarnings("unchecked")
public ControlAction[] DaemonProtocol.getActions(Writable key)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(key);
if(actionList == null) {
return new ControlAction[0];
} else {
return (ControlAction[]) actionList.toArray(new ControlAction[actionList
.size()]);
}
}
}
@SuppressWarnings("unchecked")
public void DaemonProtocol.sendAction(ControlAction action)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
actionList = new ArrayList<ControlAction>();
actions.put(action.getTarget(), actionList);
}
actionList.add(action);
}
}
@SuppressWarnings("unchecked")
public boolean DaemonProtocol.isActionPending(ControlAction action)
throws IOException{
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
return false;
} else {
return actionList.contains(action);
}
}
}
@SuppressWarnings("unchecked")
public void DaemonProtocol.removeAction(ControlAction action)
throws IOException {
synchronized (actions) {
List<ControlAction> actionList = actions.get(action.getTarget());
if(actionList == null) {
return;
} else {
actionList.remove(action);
}
}
}
public void DaemonProtocol.clearActions() throws IOException {
synchronized (actions) {
actions.clear();
}
}
public String DaemonProtocol.getFilePattern() {
//We use the environment variable HADOOP_LOGFILE to get the
//pattern to use in the search.
String logDir = System.getenv("HADOOP_LOG_DIR");
String daemonLogPattern = System.getenv("HADOOP_LOGFILE");
if(daemonLogPattern == null && daemonLogPattern.isEmpty()) {
return "*";
}
return logDir+File.separator+daemonLogPattern+"*";
}
public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
String[] list) throws IOException {
StringBuffer filePattern = new StringBuffer(getFilePattern());
if(list != null){
for(int i =0; i < list.length; ++i)
{
filePattern.append(" | grep -v " + list[i] );
}
}
String[] cmd =
new String[] {
"bash",
"-c",
"grep -c "
+ pattern + " " + filePattern
+ " | awk -F: '{s+=$2} END {print s}'" };
ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
shexec.execute();
String output = shexec.getOutput();
return Integer.parseInt(output.replaceAll("\n", "").trim());
}
private String DaemonProtocol.user = null;
public String DaemonProtocol.getDaemonUser() {
return user;
}
public void DaemonProtocol.setUser(String user) {
this.user = user;
}
}

View File

@ -0,0 +1,41 @@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
OBJS=main.o runAs.o
CC=@CC@
CFLAGS = @CFLAGS@
BINARY=runAs
installdir = @prefix@
all: $(OBJS)
$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
main.o: runAs.o main.c
$(CC) $(CFLAG) -o main.o -c main.c
runAs.o: runAs.h runAs.c
$(CC) $(CFLAG) -o runAs.o -c runAs.c
clean:
rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
install: all
cp $(BINARY) $(installdir)
uninstall:
rm -rf $(installdir)/$(BINARY)
rm -rf $(BINARY)

5117
src/test/system/c++/runAs/configure vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,65 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
AC_PREREQ(2.59)
AC_INIT([runAs],[0.1])
#changing default prefix value to empty string, so that binary does not
#gets installed within system
AC_PREFIX_DEFAULT(.)
#add new arguments --with-home
AC_ARG_WITH(home,[--with-home path to hadoop home dir])
AC_CONFIG_SRCDIR([main.c])
AC_CONFIG_HEADER([runAs.h])
# Checks for programs.
AC_PROG_CC
# Checks for libraries.
# Checks for header files.
AC_HEADER_STDC
AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
#check for HADOOP_HOME
if test "$with_home" != ""
then
AC_DEFINE_UNQUOTED(HADOOP_HOME,"$with_home")
fi
# Checks for typedefs, structures, and compiler characteristics.
AC_C_CONST
AC_TYPE_PID_T
AC_TYPE_MODE_T
AC_TYPE_SIZE_T
# Checks for library functions.
AC_FUNC_MALLOC
AC_FUNC_REALLOC
AC_FUNC_CHOWN
AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
AC_CONFIG_FILES([Makefile])
AC_OUTPUT
AC_HEADER_STDBOOL
AC_PROG_MAKE_SET

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runAs.h"
/**
* The binary would be accepting the command of following format:
* cluster-controller user hostname hadoop-daemon.sh-command
*/
int main(int argc, char **argv) {
int errorcode;
char *user;
char *hostname;
char *command;
struct passwd user_detail;
int i = 1;
/*
* Minimum number of arguments required for the binary to perform.
*/
if (argc < 4) {
fprintf(stderr, "Invalid number of arguments passed to the binary\n");
return INVALID_ARGUMENT_NUMER;
}
user = argv[1];
if (user == NULL) {
fprintf(stderr, "Invalid user name\n");
return INVALID_USER_NAME;
}
if (getuserdetail(user, &user_detail) != 0) {
fprintf(stderr, "Invalid user name\n");
return INVALID_USER_NAME;
}
if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
fprintf(stderr, "Cannot run tasks as super user\n");
return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
}
hostname = argv[2];
command = argv[3];
return process_controller_command(user, hostname, command);
}

View File

@ -0,0 +1,111 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "runAs.h"
/*
* Function to get the user details populated given a user name.
*/
int getuserdetail(char *user, struct passwd *user_detail) {
struct passwd *tempPwdPtr;
int size = sysconf(_SC_GETPW_R_SIZE_MAX);
char pwdbuffer[size];
if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
fprintf(stderr, "Invalid user provided to getpwnam\n");
return -1;
}
return 0;
}
/**
* Function to switch the user identity and set the appropriate
* group control as the user specified in the argument.
*/
int switchuser(char *user) {
//populate the user details
struct passwd user_detail;
if ((getuserdetail(user, &user_detail)) != 0) {
return INVALID_USER_NAME;
}
//set the right supplementary groups for the user.
if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
fprintf(stderr, "Init groups call for the user : %s failed\n",
user_detail.pw_name);
return INITGROUPS_FAILED;
}
errno = 0;
//switch the group.
setgid(user_detail.pw_gid);
if (errno != 0) {
fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
errno = 0;
//swith the user
setuid(user_detail.pw_uid);
if (errno != 0) {
fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
errno = 0;
//set the effective user id.
seteuid(user_detail.pw_uid);
if (errno != 0) {
fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
return SETUID_OPER_FAILED;
}
return 0;
}
/*
* Top level method which processes a cluster management
* command.
*/
int process_cluster_command(char * user, char * node , char *command) {
char *finalcommandstr;
int len;
int errorcode = 0;
if (strncmp(command, "", strlen(command)) == 0) {
fprintf(stderr, "Invalid command passed\n");
return INVALID_COMMAND_PASSED;
}
len = STRLEN + strlen(command);
finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_HOME,
command);
finalcommandstr[len + 1] = '\0';
errorcode = switchuser(user);
if (errorcode != 0) {
fprintf(stderr, "switch user failed\n");
return errorcode;
}
errno = 0;
execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
if (errno != 0) {
fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
}
return 0;
}
/*
* Process cluster controller command the API exposed to the
* main in order to execute the cluster commands.
*/
int process_controller_command(char *user, char * node, char *command) {
return process_cluster_command(user, node, command);
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
#include <assert.h>
#include <getopt.h>
#include <grp.h>
/*
* List of possible error codes.
*/
enum errorcodes {
INVALID_ARGUMENT_NUMER = 1,
INVALID_USER_NAME, //2
SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
INITGROUPS_FAILED, //4
SETUID_OPER_FAILED, //5
INVALID_COMMAND_PASSED, //6
};
#undef HADOOP_HOME
#define SSH_COMMAND "ssh"
#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded
#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_HOME)
/*
* Function to get the user details populated given a user name.
*/
int getuserdetails(char *user, struct passwd *user_detail);
/*
* Process cluster controller command the API exposed to the
* main in order to execute the cluster commands.
*/
int process_controller_command(char *user, char *node, char *command);

View File

@ -0,0 +1,52 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--
This is Herriot specific protocols. This section shouldn't be present in
a production cluster configuration. This file needs to be linked up to the
main conf/hadoop-policy.xml in the deployment process
-->
<property>
<name>security.daemon.protocol.acl</name>
<value>*</value>
<description>ACL for DaemonProtocol, extended by all other
Herriot RPC protocols.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.nn.protocol.acl</name>
<value>*</value>
<description>ACL for NNProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
NameNode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.dn.protocol.acl</name>
<value>*</value>
<description>ACL for DNProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
DataNode.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
<property>
<name>security.tt.protocol.acl</name>
<value>*</value>
<description>ACL for TTProtocol, used by the
Herriot AbstractDaemonCluster's implementations to connect to a remote
TaskTracker.
The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel".
A special value of "*" means all users are allowed.</description>
</property>
</configuration>

View File

@ -0,0 +1,343 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.ConcurrentModificationException;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.test.system.process.RemoteProcess;
/**
* Abstract class which encapsulates the DaemonClient which is used in the
* system tests.<br/>
*
* @param PROXY the proxy implementation of a specific Daemon
*/
public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
private Configuration conf;
private RemoteProcess process;
private boolean connected;
private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
/**
* Create a Daemon client.<br/>
*
* @param conf client to be used by proxy to connect to Daemon.
* @param process the Daemon process to manage the particular daemon.
*
* @throws IOException
*/
public AbstractDaemonClient(Configuration conf, RemoteProcess process)
throws IOException {
this.conf = conf;
this.process = process;
}
/**
* Gets if the client is connected to the Daemon <br/>
*
* @return true if connected.
*/
public boolean isConnected() {
return connected;
}
protected void setConnected(boolean connected) {
this.connected = connected;
}
/**
* Create an RPC proxy to the daemon <br/>
*
* @throws IOException
*/
public abstract void connect() throws IOException;
/**
* Disconnect the underlying RPC proxy to the daemon.<br/>
* @throws IOException
*/
public abstract void disconnect() throws IOException;
/**
* Get the proxy to connect to a particular service Daemon.<br/>
*
* @return proxy to connect to a particular service Daemon.
*/
protected abstract PROXY getProxy();
/**
* Gets the daemon level configuration.<br/>
*
* @return configuration using which daemon is running
*/
public Configuration getConf() {
return conf;
}
/**
* Gets the host on which Daemon is currently running. <br/>
*
* @return hostname
*/
public String getHostName() {
return process.getHostName();
}
/**
* Gets if the Daemon is ready to accept RPC connections. <br/>
*
* @return true if daemon is ready.
* @throws IOException
*/
public boolean isReady() throws IOException {
return getProxy().isReady();
}
/**
* Kills the Daemon process <br/>
* @throws IOException
*/
public void kill() throws IOException {
process.kill();
}
/**
* Checks if the Daemon process is alive or not <br/>
*
* @throws IOException
*/
public void ping() throws IOException {
getProxy().ping();
}
/**
* Start up the Daemon process. <br/>
* @throws IOException
*/
public void start() throws IOException {
process.start();
}
/**
* Get system level view of the Daemon process.
*
* @return returns system level view of the Daemon process.
*
* @throws IOException
*/
public ProcessInfo getProcessInfo() throws IOException {
return getProxy().getProcessInfo();
}
/**
* Return a file status object that represents the path.
* @param path
* given path
* @param local
* whether the path is local or not
* @return a FileStatus object
* @throws java.io.FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
public FileStatus getFileStatus(String path, boolean local) throws IOException {
return getProxy().getFileStatus(path, local);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param path
* given path
* @param local
* whether the path is local or not
* @return the statuses of the files/directories in the given patch
* @throws IOException
*/
public FileStatus[] listStatus(String path, boolean local)
throws IOException {
return getProxy().listStatus(path, local);
}
/**
* List the statuses of the files/directories in the given path if the path is
* a directory recursive/nonrecursively depending on parameters
*
* @param path
* given path
* @param local
* whether the path is local or not
* @param recursive
* whether to recursively get the status
* @return the statuses of the files/directories in the given patch
* @throws IOException
*/
public FileStatus[] listStatus(String path, boolean local, boolean recursive)
throws IOException {
List<FileStatus> status = new ArrayList<FileStatus>();
addStatus(status, path, local, recursive);
return status.toArray(new FileStatus[0]);
}
private void addStatus(List<FileStatus> status, String f,
boolean local, boolean recursive)
throws IOException {
FileStatus[] fs = listStatus(f, local);
if (fs != null) {
for (FileStatus fileStatus : fs) {
if (!f.equals(fileStatus.getPath().toString())) {
status.add(fileStatus);
if (recursive) {
addStatus(status, fileStatus.getPath().toString(), local, recursive);
}
}
}
}
}
/**
* Gets number of times FATAL log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is FATAL. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of fatal message.
* @throws IOException
*/
public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "FATAL";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of times ERROR log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is ERROR. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of error message.
* @throws IOException
*/
public int getNumberOfErrorStatementsInLog(String[] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "ERROR";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of times Warning log messages where logged in Daemon logs.
* <br/>
* Pattern used for searching is WARN. <br/>
* @param excludeExpList list of exception to exclude
* @return number of occurrence of warning message.
* @throws IOException
*/
public int getNumberOfWarnStatementsInLog(String[] excludeExpList)
throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = "WARN";
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Gets number of time given Exception were present in log file. <br/>
*
* @param e exception class.
* @param excludeExpList list of exceptions to exclude.
* @return number of exceptions in log
* @throws IOException
*/
public int getNumberOfExceptionsInLog(Exception e,
String[] excludeExpList) throws IOException {
DaemonProtocol proxy = getProxy();
String pattern = e.getClass().getSimpleName();
return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
}
/**
* Number of times ConcurrentModificationException present in log file.
* <br/>
* @param excludeExpList list of exceptions to exclude.
* @return number of times exception in log file.
* @throws IOException
*/
public int getNumberOfConcurrentModificationExceptionsInLog(
String[] excludeExpList) throws IOException {
return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
excludeExpList);
}
private int errorCount;
private int fatalCount;
private int concurrentExceptionCount;
/**
* Populate the initial exception counts to be used to assert once a testcase
* is done there was no exception in the daemon when testcase was run.
* @param excludeExpList list of exceptions to exclude
* @throws IOException
*/
protected void populateExceptionCount(String [] excludeExpList)
throws IOException {
errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
LOG.info("Number of error messages in logs : " + errorCount);
fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
LOG.info("Number of fatal statement in logs : " + fatalCount);
concurrentExceptionCount =
getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
LOG.info("Number of concurrent modification in logs : "
+ concurrentExceptionCount);
}
/**
* Assert if the new exceptions were logged into the log file.
* <br/>
* <b><i>
* Pre-req for the method is that populateExceptionCount() has
* to be called before calling this method.</b></i>
* @param excludeExpList list of exceptions to exclude
* @throws IOException
*/
protected void assertNoExceptionsOccurred(String [] excludeExpList)
throws IOException {
int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
LOG.info("Number of error messages while asserting :" + newerrorCount);
int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
LOG.info("Number of fatal messages while asserting : " + newfatalCount);
int newconcurrentExceptionCount =
getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
LOG.info("Number of concurrentmodification exception while asserting :"
+ newconcurrentExceptionCount);
Assert.assertEquals(
"New Error Messages logged in the log file", errorCount, newerrorCount);
Assert.assertEquals(
"New Fatal messages logged in the log file", fatalCount, newfatalCount);
Assert.assertEquals(
"New ConcurrentModificationException in log file",
concurrentExceptionCount, newconcurrentExceptionCount);
}
}

View File

@ -0,0 +1,293 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.IOException;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.system.process.ClusterProcessManager;
import org.apache.hadoop.test.system.process.RemoteProcess;
/**
* Abstract class which represent the cluster having multiple daemons.
*/
@SuppressWarnings("unchecked")
public abstract class AbstractDaemonCluster {
private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
private String [] excludeExpList ;
private Configuration conf;
protected ClusterProcessManager clusterManager;
private Map<Enum<?>, List<AbstractDaemonClient>> daemons =
new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
/**
* Constructor to create a cluster client.<br/>
*
* @param conf
* Configuration to be used while constructing the cluster.
* @param rcluster
* process manger instance to be used for managing the daemons.
*
* @throws IOException
*/
public AbstractDaemonCluster(Configuration conf,
ClusterProcessManager rcluster) throws IOException {
this.conf = conf;
this.clusterManager = rcluster;
createAllClients();
}
/**
* The method returns the cluster manager. The system test cases require an
* instance of HadoopDaemonRemoteCluster to invoke certain operation on the
* daemon.
*
* @return instance of clusterManager
*/
public ClusterProcessManager getClusterManager() {
return clusterManager;
}
protected void createAllClients() throws IOException {
for (RemoteProcess p : clusterManager.getAllProcesses()) {
List<AbstractDaemonClient> dms = daemons.get(p.getRole());
if (dms == null) {
dms = new ArrayList<AbstractDaemonClient>();
daemons.put(p.getRole(), dms);
}
dms.add(createClient(p));
}
}
/**
* Method to create the daemon client.<br/>
*
* @param process
* to manage the daemon.
* @return instance of the daemon client
*
* @throws IOException
*/
protected abstract AbstractDaemonClient<DaemonProtocol>
createClient(RemoteProcess process) throws IOException;
/**
* Get the global cluster configuration which was used to create the
* cluster. <br/>
*
* @return global configuration of the cluster.
*/
public Configuration getConf() {
return conf;
}
/**
*
/**
* Return the client handle of all the Daemons.<br/>
*
* @return map of role to daemon clients' list.
*/
public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
return daemons;
}
/**
* Checks if the cluster is ready for testing. <br/>
* Algorithm for checking is as follows : <br/>
* <ul>
* <li> Wait for Daemon to come up </li>
* <li> Check if daemon is ready </li>
* <li> If one of the daemon is not ready, return false </li>
* </ul>
*
* @return true if whole cluster is ready.
*
* @throws IOException
*/
public boolean isReady() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
waitForDaemon(daemon);
if (!daemon.isReady()) {
return false;
}
}
}
return true;
}
protected void waitForDaemon(AbstractDaemonClient d) {
final int TEN_SEC = 10000;
while(true) {
try {
LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
LOG.info("Daemon might not be " +
"ready or the call to setReady() method hasn't been " +
"injected to " + d.getClass() + " ");
d.connect();
break;
} catch (IOException e) {
try {
Thread.sleep(TEN_SEC);
} catch (InterruptedException ie) {
}
}
}
}
/**
* Starts the cluster daemons.
* @throws IOException
*/
public void start() throws IOException {
clusterManager.start();
}
/**
* Stops the cluster daemons.
* @throws IOException
*/
public void stop() throws IOException {
clusterManager.stop();
}
/**
* Connect to daemon RPC ports.
* @throws IOException
*/
public void connect() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
daemon.connect();
}
}
}
/**
* Disconnect to daemon RPC ports.
* @throws IOException
*/
public void disconnect() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
daemon.disconnect();
}
}
}
/**
* Ping all the daemons of the cluster.
* @throws IOException
*/
public void ping() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
daemon.ping();
}
}
}
/**
* Connect to the cluster and ensure that it is clean to run tests.
* @throws Exception
*/
public void setUp() throws Exception {
while (!isReady()) {
Thread.sleep(1000);
}
connect();
ping();
clearAllControlActions();
ensureClean();
populateExceptionCounts();
}
/**
* This is mainly used for the test cases to set the list of exceptions
* that will be excluded.
* @param excludeExpList list of exceptions to exclude
*/
public void setExcludeExpList(String [] excludeExpList)
{
this.excludeExpList = excludeExpList;
}
public void clearAllControlActions() throws IOException {
for (List<AbstractDaemonClient> set : daemons.values()) {
for (AbstractDaemonClient daemon : set) {
LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
daemon.getProxy().clearActions();
}
}
}
/**
* Ensure that the cluster is clean to run tests.
* @throws IOException
*/
public void ensureClean() throws IOException {
}
/**
* Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
* @throws IOException
*/
public void tearDown() throws IOException {
ensureClean();
clearAllControlActions();
assertNoExceptionMessages();
disconnect();
}
/**
* Populate the exception counts in all the daemons so that it can be checked when
* the testcase has finished running.<br/>
* @throws IOException
*/
protected void populateExceptionCounts() throws IOException {
for(List<AbstractDaemonClient> lst : daemons.values()) {
for(AbstractDaemonClient d : lst) {
d.populateExceptionCount(excludeExpList);
}
}
}
/**
* Assert no exception has been thrown during the sequence of the actions.
* <br/>
* @throws IOException
*/
protected void assertNoExceptionMessages() throws IOException {
for(List<AbstractDaemonClient> lst : daemons.values()) {
for(AbstractDaemonClient d : lst) {
d.assertNoExceptionsOccurred(excludeExpList);
}
}
}
}

View File

@ -0,0 +1,86 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.apache.hadoop.io.Writable;
/**
* Class to represent a control action which can be performed on Daemon.<br/>
*
*/
public abstract class ControlAction<T extends Writable> implements Writable {
private T target;
/**
* Default constructor of the Control Action, sets the Action type to zero. <br/>
*/
public ControlAction() {
}
/**
* Constructor which sets the type of the Control action to a specific type. <br/>
*
* @param target
* of the control action.
*/
public ControlAction(T target) {
this.target = target;
}
/**
* Gets the id of the control action <br/>
*
* @return target of action
*/
public T getTarget() {
return target;
}
@Override
public void readFields(DataInput in) throws IOException {
target.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
target.write(out);
}
@Override
public boolean equals(Object obj) {
if (obj instanceof ControlAction) {
ControlAction<T> other = (ControlAction<T>) obj;
return (this.target.equals(other.getTarget()));
} else {
return false;
}
}
@Override
public String toString() {
return "Action Target : " + this.target;
}
}

View File

@ -0,0 +1,165 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol;
/**
* RPC interface of a given Daemon.
*/
public interface DaemonProtocol extends VersionedProtocol{
long versionID = 1L;
/**
* Returns the Daemon configuration.
* @return Configuration
* @throws IOException in case of errors
*/
Configuration getDaemonConf() throws IOException;
/**
* Check if the Daemon is alive.
*
* @throws IOException
* if Daemon is unreachable.
*/
void ping() throws IOException;
/**
* Check if the Daemon is ready to accept RPC connections.
*
* @return true if Daemon is ready to accept RPC connection.
* @throws IOException in case of errors
*/
boolean isReady() throws IOException;
/**
* Get system level view of the Daemon process.
*
* @return returns system level view of the Daemon process.
*
* @throws IOException in case of errors
*/
ProcessInfo getProcessInfo() throws IOException;
/**
* Return a file status object that represents the path.
* @param path
* given path
* @param local
* whether the path is local or not
* @return a FileStatus object
* @throws FileNotFoundException when the path does not exist;
* IOException see specific implementation
*/
FileStatus getFileStatus(String path, boolean local) throws IOException;
/**
* List the statuses of the files/directories in the given path if the path is
* a directory.
*
* @param path
* given path
* @param local
* whether the path is local or not
* @return the statuses of the files/directories in the given patch
* @throws IOException in case of errors
*/
FileStatus[] listStatus(String path, boolean local) throws IOException;
/**
* Enables a particular control action to be performed on the Daemon <br/>
*
* @param action is a control action to be enabled.
*
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
void sendAction(ControlAction action) throws IOException;
/**
* Checks if the particular control action has be delivered to the Daemon
* component <br/>
*
* @param action to be checked.
*
* @return true if action is still in waiting queue of
* actions to be delivered.
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
boolean isActionPending(ControlAction action) throws IOException;
/**
* Removes a particular control action from the list of the actions which the
* daemon maintains. <br/>
* <i><b>Not to be directly called by Test Case or clients.</b></i>
* @param action to be removed
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
void removeAction(ControlAction action) throws IOException;
/**
* Clears out the list of control actions on the particular daemon.
* <br/>
* @throws IOException in case of errors
*/
void clearActions() throws IOException;
/**
* Gets a list of pending actions which are targeted on the specified key.
* <br/>
* <i><b>Not to be directly used by clients</b></i>
* @param key target
* @return list of actions.
* @throws IOException in case of errors
*/
@SuppressWarnings("unchecked")
ControlAction[] getActions(Writable key) throws IOException;
/**
* Gets the number of times a particular pattern has been found in the
* daemons log file.<br/>
* <b><i>Please note that search spans across all previous messages of
* Daemon, so better practice is to get previous counts before an operation
* and then re-check if the sequence of action has caused any problems</i></b>
* @param pattern to look for in the damon's log file
* @param list of exceptions to ignore
* @return number of times the pattern if found in log file.
* @throws IOException in case of errors
*/
int getNumberOfMatchesInLogFile(String pattern, String[] list)
throws IOException;
/**
* Gets the user who started the particular daemon initially. <br/>
*
* @return user who started the particular daemon.
* @throws IOException in case of errors
*/
String getDaemonUser() throws IOException;
}

View File

@ -0,0 +1,77 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.util.Map;
import org.apache.hadoop.io.Writable;
/**
* Daemon system level process information.
*/
public interface ProcessInfo extends Writable {
/**
* Get the current time in the millisecond.<br/>
*
* @return current time on daemon clock in millisecond.
*/
public long currentTimeMillis();
/**
* Get the environment that was used to start the Daemon process.<br/>
*
* @return the environment variable list.
*/
public Map<String,String> getEnv();
/**
* Get the System properties of the Daemon process.<br/>
*
* @return the properties list.
*/
public Map<String,String> getSystemProperties();
/**
* Get the number of active threads in Daemon VM.<br/>
*
* @return number of active threads in Daemon VM.
*/
public int activeThreadCount();
/**
* Get the maximum heap size that is configured for the Daemon VM. <br/>
*
* @return maximum heap size.
*/
public long maxMemory();
/**
* Get the free memory in Daemon VM.<br/>
*
* @return free memory.
*/
public long freeMemory();
/**
* Get the total used memory in Demon VM. <br/>
*
* @return total used memory.
*/
public long totalMemory();
}

View File

@ -0,0 +1,159 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class ProcessInfoImpl implements ProcessInfo {
private int threadCount;
private long currentTime;
private long freemem;
private long maxmem;
private long totmem;
private Map<String, String> env;
private Map<String, String> props;
public ProcessInfoImpl() {
env = new HashMap<String, String>();
props = new HashMap<String, String>();
}
/**
* Construct a concrete process information object. <br/>
*
* @param threadCount
* count of threads.
* @param currentTime
* @param freemem
* @param maxmem
* @param totmem
* @param env environment list.
* @param props
*/
public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
long maxmem, long totmem, Map<String, String> env,
Map<String, String> props) {
this.threadCount = threadCount;
this.currentTime = currentTime;
this.freemem = freemem;
this.maxmem = maxmem;
this.totmem = totmem;
this.env = env;
this.props = props;
}
@Override
public int activeThreadCount() {
return threadCount;
}
@Override
public long currentTimeMillis() {
return currentTime;
}
@Override
public long freeMemory() {
return freemem;
}
@Override
public Map<String, String> getEnv() {
return env;
}
@Override
public Map<String,String> getSystemProperties() {
return props;
}
@Override
public long maxMemory() {
return maxmem;
}
@Override
public long totalMemory() {
return totmem;
}
@Override
public void readFields(DataInput in) throws IOException {
this.threadCount = in.readInt();
this.currentTime = in.readLong();
this.freemem = in.readLong();
this.maxmem = in.readLong();
this.totmem = in.readLong();
read(in, env);
read(in, props);
}
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(threadCount);
out.writeLong(currentTime);
out.writeLong(freemem);
out.writeLong(maxmem);
out.writeLong(totmem);
write(out, env);
write(out, props);
}
private void read(DataInput in, Map<String, String> map) throws IOException {
int size = in.readInt();
for (int i = 0; i < size; i = i + 2) {
String key = in.readUTF();
String value = in.readUTF();
map.put(key, value);
}
}
private void write(DataOutput out, Map<String, String> map)
throws IOException {
int size = (map.size() * 2);
out.writeInt(size);
for (Map.Entry<String, String> entry : map.entrySet()) {
out.writeUTF(entry.getKey());
out.writeUTF(entry.getValue());
}
}
@Override
public String toString() {
StringBuffer strBuf = new StringBuffer();
strBuf.append(String.format("active threads : %d\n", threadCount));
strBuf.append(String.format("current time : %d\n", currentTime));
strBuf.append(String.format("free memory : %d\n", freemem));
strBuf.append(String.format("total memory : %d\n", totmem));
strBuf.append(String.format("max memory : %d\n", maxmem));
strBuf.append("Environment Variables : \n");
for (Map.Entry<String, String> entry : env.entrySet()) {
strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
entry.getValue()));
}
return strBuf.toString();
}
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
/**
* Interface to manage the remote processes in the cluster.
*/
public interface ClusterProcessManager {
/**
* Initialization method to pass the configuration object which is required
* by the ClusterProcessManager to manage the cluster.<br/>
* Configuration object should typically contain all the parameters which are
* required by the implementations.<br/>
*
* @param conf configuration containing values of the specific keys which
* are required by the implementation of the cluster process manger.
*
* @throws IOException when initialization fails.
*/
void init(Configuration conf) throws IOException;
/**
* Get the list of RemoteProcess handles of all the remote processes.
*/
List<RemoteProcess> getAllProcesses();
/**
* Get all the roles this cluster's daemon processes have.
*/
Set<Enum<?>> getRoles();
/**
* Method to start all the remote daemons.<br/>
*
* @throws IOException if startup procedure fails.
*/
void start() throws IOException;
/**
* Starts the daemon from the user specified conf dir.
* @param newConfLocation the dir where the new conf files reside.
* @throws IOException
*/
void start(String newConfLocation) throws IOException;
/**
* Stops the daemon running from user specified conf dir.
*
* @param newConfLocation
* the dir where ther new conf files reside.
* @throws IOException
*/
void stop(String newConfLocation) throws IOException;
/**
* Method to shutdown all the remote daemons.<br/>
*
* @throws IOException if shutdown procedure fails.
*/
void stop() throws IOException;
/**
* Gets if multi-user support is enabled for this cluster.
* <br/>
* @return true if multi-user support is enabled.
* @throws IOException
*/
boolean isMultiUserSupported() throws IOException;
/**
* The pushConfig is used to push a new config to the daemons.
* @param localDir
* @return is the remoteDir location where config will be pushed
* @throws IOException
*/
String pushConfig(String localDir) throws IOException;
}

View File

@ -0,0 +1,404 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
/**
* The concrete class which implements the start up and shut down based routines
* based on the hadoop-daemon.sh. <br/>
*
* Class requires two keys to be present in the Configuration objects passed to
* it. Look at <code>CONF_HADOOPHOME</code> and
* <code>CONF_HADOOPCONFDIR</code> for the names of the
* configuration keys.
*
* Following will be the format which the final command execution would look :
* <br/>
* <code>
* ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName
* --config HADOOP_CONF_DIR (start|stop) command'
* </code>
*/
public abstract class HadoopDaemonRemoteCluster
implements ClusterProcessManager {
private static final Log LOG = LogFactory
.getLog(HadoopDaemonRemoteCluster.class.getName());
public static final String CONF_HADOOPNEWCONFDIR =
"test.system.hdrc.hadoopnewconfdir";
/**
* Key used to configure the HADOOP_HOME to be used by the
* HadoopDaemonRemoteCluster.
*/
public final static String CONF_HADOOPHOME =
"test.system.hdrc.hadoophome";
public final static String CONF_SCRIPTDIR =
"test.system.hdrc.deployed.scripts.dir";
/**
* Key used to configure the HADOOP_CONF_DIR to be used by the
* HadoopDaemonRemoteCluster.
*/
public final static String CONF_HADOOPCONFDIR =
"test.system.hdrc.hadoopconfdir";
public final static String CONF_DEPLOYED_HADOOPCONFDIR =
"test.system.hdrc.deployed.hadoopconfdir";
private String hadoopHome;
protected String hadoopConfDir;
protected String scriptsDir;
protected String hadoopNewConfDir;
private final Set<Enum<?>> roles;
private final List<HadoopDaemonInfo> daemonInfos;
private List<RemoteProcess> processes;
protected Configuration conf;
public static class HadoopDaemonInfo {
public final String cmd;
public final Enum<?> role;
public final List<String> hostNames;
public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
super();
this.cmd = cmd;
this.role = role;
this.hostNames = hostNames;
}
public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile)
throws IOException {
super();
this.cmd = cmd;
this.role = role;
File file = new File(getDeployedHadoopConfDir(), hostFile);
BufferedReader reader = null;
hostNames = new ArrayList<String>();
try {
reader = new BufferedReader(new FileReader(file));
String host = null;
while ((host = reader.readLine()) != null) {
if (host.trim().isEmpty() || host.startsWith("#")) {
// Skip empty and possible comment lines
// throw new IllegalArgumentException(
// "Hostname could not be found in file " + hostFile);
continue;
}
hostNames.add(host.trim());
}
if (hostNames.size() < 1) {
throw new IllegalArgumentException("At least one hostname "
+
"is required to be present in file - " + hostFile);
}
} finally {
try {
reader.close();
} catch (IOException e) {
LOG.warn("Could not close reader");
}
}
LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from "
+ hostFile);
}
}
@Override
public String pushConfig(String localDir) throws IOException {
for (RemoteProcess process : processes){
process.pushConfig(localDir);
}
return hadoopNewConfDir;
}
public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
this.daemonInfos = daemonInfos;
this.roles = new HashSet<Enum<?>>();
for (HadoopDaemonInfo info : daemonInfos) {
this.roles.add(info.role);
}
}
@Override
public void init(Configuration conf) throws IOException {
this.conf = conf;
populateDirectories(conf);
this.processes = new ArrayList<RemoteProcess>();
populateDaemons();
}
@Override
public List<RemoteProcess> getAllProcesses() {
return processes;
}
@Override
public Set<Enum<?>> getRoles() {
return roles;
}
/**
* Method to populate the hadoop home and hadoop configuration directories.
*
* @param conf
* Configuration object containing values for
* CONF_HADOOPHOME and
* CONF_HADOOPCONFDIR
*
* @throws IllegalArgumentException
* if the configuration or system property set does not contain
* values for the required keys.
*/
protected void populateDirectories(Configuration conf) {
hadoopHome = conf.get(CONF_HADOOPHOME);
hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
scriptsDir = conf.get(CONF_SCRIPTDIR);
hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
|| hadoopConfDir.isEmpty()) {
LOG.error("No configuration "
+ "for the HADOOP_HOME and HADOOP_CONF_DIR passed");
throw new IllegalArgumentException(
"No Configuration passed for hadoop home " +
"and hadoop conf directories");
}
}
public static String getDeployedHadoopConfDir() {
String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
if (dir == null || dir.isEmpty()) {
LOG.error("No configuration "
+ "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
throw new IllegalArgumentException(
"No Configuration passed for hadoop deployed conf directory");
}
return dir;
}
@Override
public void start() throws IOException {
for (RemoteProcess process : processes) {
process.start();
}
}
@Override
public void start(String newConfLocation)throws IOException {
for (RemoteProcess process : processes) {
process.start(newConfLocation);
}
}
@Override
public void stop() throws IOException {
for (RemoteProcess process : processes) {
process.kill();
}
}
@Override
public void stop(String newConfLocation) throws IOException {
for (RemoteProcess process : processes) {
process.kill(newConfLocation);
}
}
protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
for (String host : info.hostNames) {
InetAddress addr = InetAddress.getByName(host);
RemoteProcess process = getProcessManager(info,
addr.getCanonicalHostName());
processes.add(process);
}
}
protected void populateDaemons() throws IOException {
for (HadoopDaemonInfo info : daemonInfos) {
populateDaemon(info);
}
}
@Override
public boolean isMultiUserSupported() throws IOException {
return false;
}
protected RemoteProcess getProcessManager(
HadoopDaemonInfo info, String hostName) {
RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
return process;
}
/**
* The core daemon class which actually implements the remote process
* management of actual daemon processes in the cluster.
*
*/
class ScriptDaemon implements RemoteProcess {
private static final String STOP_COMMAND = "stop";
private static final String START_COMMAND = "start";
private static final String SCRIPT_NAME = "hadoop-daemon.sh";
private static final String PUSH_CONFIG ="pushConfig.sh";
protected final String daemonName;
protected final String hostName;
private final Enum<?> role;
public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
this.daemonName = daemonName;
this.hostName = hostName;
this.role = role;
}
@Override
public String getHostName() {
return hostName;
}
private String[] getPushConfigCommand(String localDir, String remoteDir,
File scriptDir) throws IOException{
ArrayList<String> cmdArgs = new ArrayList<String>();
cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
cmdArgs.add(localDir);
cmdArgs.add(hostName);
cmdArgs.add(remoteDir);
cmdArgs.add(hadoopConfDir);
return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
}
private ShellCommandExecutor buildPushConfig(String local, String remote )
throws IOException {
File scriptDir = new File(scriptsDir);
String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
HashMap<String, String> env = new HashMap<String, String>();
ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
scriptDir, env);
LOG.info(executor.toString());
return executor;
}
private ShellCommandExecutor createNewConfDir() throws IOException {
ArrayList<String> cmdArgs = new ArrayList<String>();
cmdArgs.add("ssh");
cmdArgs.add(hostName);
cmdArgs.add("if [ -d "+ hadoopNewConfDir+
" ];\n then echo Will remove existing directory; rm -rf "+
hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
"echo " + hadoopNewConfDir + " doesnt exist hence creating" +
"; mkdir " + hadoopNewConfDir + ";\n fi");
String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
LOG.info(executor.toString());
return executor;
}
@Override
public void pushConfig(String localDir) throws IOException {
createNewConfDir().execute();
buildPushConfig(localDir, hadoopNewConfDir).execute();
}
private ShellCommandExecutor buildCommandExecutor(String command,
String confDir) {
String[] commandArgs = getCommand(command, confDir);
File cwd = new File(".");
HashMap<String, String> env = new HashMap<String, String>();
env.put("HADOOP_CONF_DIR", confDir);
ShellCommandExecutor executor
= new ShellCommandExecutor(commandArgs, cwd, env);
LOG.info(executor.toString());
return executor;
}
private File getBinDir() {
File binDir = new File(hadoopHome, "bin");
return binDir;
}
protected String[] getCommand(String command, String confDir) {
ArrayList<String> cmdArgs = new ArrayList<String>();
File binDir = getBinDir();
cmdArgs.add("ssh");
cmdArgs.add(hostName);
cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
cmdArgs.add("--config");
cmdArgs.add(confDir);
// XXX Twenty internal version does not support --script option.
cmdArgs.add(command);
cmdArgs.add(daemonName);
return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
}
@Override
public void kill() throws IOException {
kill(hadoopConfDir);
}
@Override
public void start() throws IOException {
start(hadoopConfDir);
}
public void start(String newConfLocation) throws IOException {
ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
newConfLocation);
cme.execute();
String output = cme.getOutput();
if (!output.isEmpty()) { //getOutput() never returns null value
if (output.toLowerCase().contains("error")) {
LOG.warn("Error is detected.");
throw new IOException("Start error\n" + output);
}
}
}
public void kill(String newConfLocation) throws IOException {
ShellCommandExecutor cme
= buildCommandExecutor(STOP_COMMAND, newConfLocation);
cme.execute();
String output = cme.getOutput();
if (!output.isEmpty()) { //getOutput() never returns null value
if (output.toLowerCase().contains("error")) {
LOG.info("Error is detected.");
throw new IOException("Kill error\n" + output);
}
}
}
@Override
public Enum<?> getRole() {
return role;
}
}
}

View File

@ -0,0 +1,96 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version
* 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
public abstract class MultiUserHadoopDaemonRemoteCluster
extends HadoopDaemonRemoteCluster {
public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
super(daemonInfos);
}
@Override
protected RemoteProcess getProcessManager(
HadoopDaemonInfo info, String hostName) {
return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
}
@Override
public boolean isMultiUserSupported() throws IOException {
return true;
}
class MultiUserScriptDaemon extends ScriptDaemon {
private static final String MULTI_USER_BINARY_PATH_KEY =
"test.system.hdrc.multi-user.binary.path";
private static final String MULTI_USER_MANAGING_USER =
"test.system.hdrc.multi-user.managinguser.";
private String binaryPath;
/**
* Manging user for a particular daemon is gotten by
* MULTI_USER_MANAGING_USER + daemonname
*/
private String mangingUser;
public MultiUserScriptDaemon(
String daemonName, String hostName, Enum<?> role) {
super(daemonName, hostName, role);
initialize(daemonName);
}
private void initialize(String daemonName) {
binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
if (binaryPath == null || binaryPath.trim().isEmpty()) {
throw new IllegalArgumentException(
"Binary path for multi-user path is not present. Please set "
+ MULTI_USER_BINARY_PATH_KEY + " correctly");
}
File binaryFile = new File(binaryPath);
if (!binaryFile.exists() || !binaryFile.canExecute()) {
throw new IllegalArgumentException(
"Binary file path is not configured correctly. Please set "
+ MULTI_USER_BINARY_PATH_KEY
+ " to properly configured binary file.");
}
mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
if (mangingUser == null || mangingUser.trim().isEmpty()) {
throw new IllegalArgumentException(
"Manging user for daemon not present please set : "
+ MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
}
}
@Override
protected String[] getCommand(String command,String confDir) {
ArrayList<String> commandList = new ArrayList<String>();
commandList.add(binaryPath);
commandList.add(mangingUser);
commandList.add(hostName);
commandList.add("--config "
+ confDir + " " + command + " " + daemonName);
return (String[]) commandList.toArray(new String[commandList.size()]);
}
}
}

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test.system.process;
import java.io.IOException;
/**
* Interface to manage the remote process.
*/
public interface RemoteProcess {
/**
* Get the host on which the daemon process is running/stopped.<br/>
*
* @return hostname on which process is running/stopped.
*/
String getHostName();
/**
* Start a given daemon process.<br/>
*
* @throws IOException if startup fails.
*/
void start() throws IOException;
/**
* Starts a daemon from user specified conf dir.
* @param newConfLocation is dir where new conf resides.
* @throws IOException
*/
void start(String newConfLocation) throws IOException;
/**
* Stop a given daemon process.<br/>
*
* @throws IOException if shutdown fails.
*/
void kill() throws IOException;
/**
* Stops a given daemon running from user specified
* conf dir. </br>
* @throws IOException
* @param newConfLocation dir location where new conf resides.
*/
void kill(String newConfLocation) throws IOException;
/**
* Get the role of the Daemon in the cluster.
*
* @return Enum
*/
Enum<?> getRole();
/**
* Pushed the configuration to new configuration directory
* @param localDir
* @throws IOException
*/
void pushConfig(String localDir) throws IOException;
}

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# local folder with new configuration file
LOCAL_DIR=$1
# remote daemon host
HOST=$2
#remote dir points to the location of new config files
REMOTE_DIR=$3
# remote daemon HADOOP_CONF_DIR location
DAEMON_HADOOP_CONF_DIR=$4
if [ $# -ne 4 ]; then
echo "Wrong number of parameters" >&2
exit 2
fi
ret_value=0
echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
echo and populates it with new configs prepared in $LOCAL_DIR
ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
ret_value=$?
# make sure files are writeble
ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
# copy new files over
scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
err_code=`echo $? + $ret_value | bc`
echo Copying of files from local to remote returned ${err_code}