Merge r1460409 through r1462697 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1462698 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-03-30 03:50:03 +00:00
commit f5bbc2d950
193 changed files with 5278 additions and 4229 deletions

View File

@ -139,6 +139,15 @@ Create a local staging version of the website (in /tmp/hadoop-site)
---------------------------------------------------------------------------------- ----------------------------------------------------------------------------------
Building on OS/X
----------------------------------------------------------------------------------
Hadoop does not build on OS/X with Java 7.
see: https://issues.apache.org/jira/browse/HADOOP-9350
----------------------------------------------------------------------------------
Building on Windows Building on Windows
---------------------------------------------------------------------------------- ----------------------------------------------------------------------------------

View File

@ -323,7 +323,7 @@ checkAuthor () {
} }
############################################################################### ###############################################################################
### Check for tests and their timeout in the patch ### Check for tests in the patch
checkTests () { checkTests () {
echo "" echo ""
echo "" echo ""
@ -357,25 +357,7 @@ checkTests () {
JIRA_COMMENT="$JIRA_COMMENT JIRA_COMMENT="$JIRA_COMMENT
{color:green}+1 tests included{color}. The patch appears to include $testReferences new or modified test files." {color:green}+1 tests included{color}. The patch appears to include $testReferences new or modified test files."
echo ""
echo "======================================================================"
echo "======================================================================"
echo " Checking if the tests have timeout assigned in this patch."
echo "======================================================================"
echo "======================================================================"
nontimeoutTests=`cat $PATCH_DIR/patch | $AWK '{ printf "%s ", $0 }' | $GREP --extended-regex --count '[ ]*\+[ ]*((@Test[\+ ]*[A-Za-z]+)|([\+ ]*@Test[ \+]*\([ \+]*\)[\ ]*\+?[ ]*[A-Za-z]+)|([\+ ]*@Test[\+ ]*\(exception[ \+]*=[ \+]*[A-Z.a-z0-9A-Z ]*\)))'`
if [[ $nontimeoutTests == 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
{color:green}+1 tests included appear to have a timeout.{color}"
return 0 return 0
fi
JIRA_COMMENT="$JIRA_COMMENT
{color:red}-1 one of tests included doesn't have a timeout.{color}"
return 1
} }
cleanUpXml () { cleanUpXml () {

View File

@ -17,6 +17,8 @@ Trunk (Unreleased)
HADOOP-9380 Add totalLength to rpc response (sanjay Radia) HADOOP-9380 Add totalLength to rpc response (sanjay Radia)
HADOOP-9194. RPC Support for QoS. (Junping Du via llu)
NEW FEATURES NEW FEATURES
HADOOP-8561. Introduce HADOOP_PROXY_USER for secure impersonation in child HADOOP-8561. Introduce HADOOP_PROXY_USER for secure impersonation in child
@ -90,9 +92,6 @@ Trunk (Unreleased)
HADOOP-8367 Improve documentation of declaringClassProtocolName in HADOOP-8367 Improve documentation of declaringClassProtocolName in
rpc headers. (Sanjay Radia) rpc headers. (Sanjay Radia)
HADOOP-8415. Add getDouble() and setDouble() in
org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS HADOOP-7659. fs -getmerge isn't guaranteed to work well over non-HDFS
filesystems (harsh) filesystems (harsh)
@ -162,9 +161,7 @@ Trunk (Unreleased)
HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia) HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
HADOOP-9112. test-patch should -1 for @Tests without a timeout HADOOP-9258 Add stricter tests to FileSystemContractTestBase (stevel)
(Surenkumar Nihalani via bobby)
BUG FIXES BUG FIXES
@ -358,6 +355,15 @@ Trunk (Unreleased)
HADOOP-9431 TestSecurityUtil#testLocalHostNameForNullOrWild on systems where hostname HADOOP-9431 TestSecurityUtil#testLocalHostNameForNullOrWild on systems where hostname
contains capital letters (Chris Nauroth via sanjay) contains capital letters (Chris Nauroth via sanjay)
HADOOP-9261 S3n filesystem can move a directory under itself -and so lose data
(fixed in HADOOP-9258) (stevel)
HADOOP-9265 S3 blockstore filesystem breaks part of the Filesystem contract
(fixed in HADOOP-9258) (stevel)
HADOOP-9433 TestLocalFileSystem#testHasFileDescriptor leaks file handle
(Chris Nauroth via sanjay)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -505,6 +511,9 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9283. Add support for running the Hadoop client on AIX. (atm) HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
HADOOP-8415. Add getDouble() and setDouble() in
org.apache.hadoop.conf.Configuration (Jan van der Lugt via harsh)
IMPROVEMENTS IMPROVEMENTS
HADOOP-9253. Capture ulimit info in the logs at service start time. HADOOP-9253. Capture ulimit info in the logs at service start time.
@ -525,8 +534,13 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9318. When exiting on a signal, print the signal name first. (Colin HADOOP-9318. When exiting on a signal, print the signal name first. (Colin
Patrick McCabe via atm) Patrick McCabe via atm)
HADOOP-9358. "Auth failed" log should include exception string (todd)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
(todd)
BUG FIXES BUG FIXES
HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh) HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
@ -585,6 +599,14 @@ Release 2.0.5-beta - UNRELEASED
HADOOP-9299. kerberos name resolution is kicking in even when kerberos HADOOP-9299. kerberos name resolution is kicking in even when kerberos
is not configured (daryn) is not configured (daryn)
HADOOP-9430. TestSSLFactory fails on IBM JVM. (Amir Sanjar via suresh)
HADOOP-9125. LdapGroupsMapping threw CommunicationException after some
idle time. (Kai Zheng via atm)
HADOOP-9357. Fallback to default authority if not specified in FileContext.
(Andrew Wang via eli)
Release 2.0.4-alpha - UNRELEASED Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -605,6 +627,9 @@ Release 2.0.4-alpha - UNRELEASED
HADOOP-9408. misleading description for net.topology.table.file.name HADOOP-9408. misleading description for net.topology.table.file.name
property in core-default.xml. (rajeshbabu via suresh) property in core-default.xml. (rajeshbabu via suresh)
HADOOP-9444. Modify hadoop-policy.xml to replace unexpanded variables to a
default value of '*'. (Roman Shaposhnik via vinodkv)
Release 2.0.3-alpha - 2013-02-06 Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -77,7 +77,7 @@
<property> <property>
<name>security.admin.operations.protocol.acl</name> <name>security.admin.operations.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value> <value>*</value>
<description>ACL for AdminOperationsProtocol. Used for admin commands. <description>ACL for AdminOperationsProtocol. Used for admin commands.
The ACL is a comma-separated list of user and group names. The user and The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel". group list is separated by a blank. For e.g. "alice,bob users,wheel".
@ -86,7 +86,7 @@
<property> <property>
<name>security.refresh.usertogroups.mappings.protocol.acl</name> <name>security.refresh.usertogroups.mappings.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value> <value>*</value>
<description>ACL for RefreshUserMappingsProtocol. Used to refresh <description>ACL for RefreshUserMappingsProtocol. Used to refresh
users mappings. The ACL is a comma-separated list of user and users mappings. The ACL is a comma-separated list of user and
group names. The user and group list is separated by a blank. For group names. The user and group list is separated by a blank. For
@ -96,7 +96,7 @@
<property> <property>
<name>security.refresh.policy.protocol.acl</name> <name>security.refresh.policy.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value> <value>*</value>
<description>ACL for RefreshAuthorizationPolicyProtocol, used by the <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
dfsadmin and mradmin commands to refresh the security policy in-effect. dfsadmin and mradmin commands to refresh the security policy in-effect.
The ACL is a comma-separated list of user and group names. The user and The ACL is a comma-separated list of user and group names. The user and
@ -120,7 +120,7 @@
<property> <property>
<name>security.qjournal.service.protocol.acl</name> <name>security.qjournal.service.protocol.acl</name>
<value>${HADOOP_HDFS_USER}</value> <value>*</value>
<description>ACL for QJournalProtocol, used by the NN to communicate with <description>ACL for QJournalProtocol, used by the NN to communicate with
JNs when using the QuorumJournalManager for edit logs.</description> JNs when using the QuorumJournalManager for edit logs.</description>
</property> </property>
@ -139,7 +139,7 @@
<property> <property>
<name>security.resourcetracker.protocol.acl</name> <name>security.resourcetracker.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value> <value>*</value>
<description>ACL for ResourceTracker protocol, used by the <description>ACL for ResourceTracker protocol, used by the
ResourceManager and NodeManager to communicate with each other. ResourceManager and NodeManager to communicate with each other.
The ACL is a comma-separated list of user and group names. The user and The ACL is a comma-separated list of user and group names. The user and
@ -149,7 +149,7 @@
<property> <property>
<name>security.admin.protocol.acl</name> <name>security.admin.protocol.acl</name>
<value>${HADOOP_YARN_USER}</value> <value>*</value>
<description>ACL for RMAdminProtocol, for admin commands. <description>ACL for RMAdminProtocol, for admin commands.
The ACL is a comma-separated list of user and group names. The user and The ACL is a comma-separated list of user and group names. The user and
group list is separated by a blank. For e.g. "alice,bob users,wheel". group list is separated by a blank. For e.g. "alice,bob users,wheel".

View File

@ -244,17 +244,33 @@ public final class FileContext {
} }
/* /*
* Remove relative part - return "absolute": * Resolve a relative path passed from the user.
* If input is relative path ("foo/bar") add wd: ie "/<workingDir>/foo/bar" *
* A fully qualified uri ("hdfs://nn:p/foo/bar") or a slash-relative path * Relative paths are resolved against the current working directory
* (e.g. "foo/bar" becomes "/<workingDir>/foo/bar").
* Fully-qualified URIs (e.g. "hdfs://nn:p/foo/bar") and slash-relative paths
* ("/foo/bar") are returned unchanged. * ("/foo/bar") are returned unchanged.
* *
* Additionally, we fix malformed URIs that specify a scheme but not an
* authority (e.g. "hdfs:///foo/bar"). Per RFC 2395, we remove the scheme
* if it matches the default FS, and let the default FS add in the default
* scheme and authority later (see {@link #AbstractFileSystem#checkPath}).
*
* Applications that use FileContext should use #makeQualified() since * Applications that use FileContext should use #makeQualified() since
* they really want a fully qualified URI. * they really want a fully-qualified URI.
* Hence this method is not called makeAbsolute() and * Hence this method is not called makeAbsolute() and
* has been deliberately declared private. * has been deliberately declared private.
*/ */
private Path fixRelativePart(Path p) { private Path fixRelativePart(Path p) {
// Per RFC 2396 5.2, drop schema if there is a scheme but no authority.
if (p.hasSchemeAndNoAuthority()) {
String scheme = p.toUri().getScheme();
if (scheme.equalsIgnoreCase(defaultFS.getUri().getScheme())) {
p = new Path(p.toUri().getSchemeSpecificPart());
}
}
// Absolute paths are unchanged. Relative paths are resolved against the
// current working directory.
if (p.isUriPathAbsolute()) { if (p.isUriPathAbsolute()) {
return p; return p;
} else { } else {

View File

@ -21,6 +21,7 @@ import java.io.Closeable;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -211,12 +212,46 @@ public abstract class FileSystem extends Configured implements Closeable {
public abstract URI getUri(); public abstract URI getUri();
/** /**
* Resolve the uri's hostname and add the default port if not in the uri * Return a canonicalized form of this FileSystem's URI.
*
* The default implementation simply calls {@link #canonicalizeUri(URI)}
* on the filesystem's own URI, so subclasses typically only need to
* implement that method.
*
* @see #canonicalizeUri(URI)
*/
protected URI getCanonicalUri() {
return canonicalizeUri(getUri());
}
/**
* Canonicalize the given URI.
*
* This is filesystem-dependent, but may for example consist of
* canonicalizing the hostname using DNS and adding the default
* port if not specified.
*
* The default implementation simply fills in the default port if
* not specified and if the filesystem has a default port.
*
* @return URI * @return URI
* @see NetUtils#getCanonicalUri(URI, int) * @see NetUtils#getCanonicalUri(URI, int)
*/ */
protected URI getCanonicalUri() { protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(getUri(), getDefaultPort()); if (uri.getPort() == -1 && getDefaultPort() > 0) {
// reconstruct the uri with the default port set
try {
uri = new URI(uri.getScheme(), uri.getUserInfo(),
uri.getHost(), getDefaultPort(),
uri.getPath(), uri.getQuery(), uri.getFragment());
} catch (URISyntaxException e) {
// Should never happen!
throw new AssertionError("Valid URI became unparseable: " +
uri);
}
}
return uri;
} }
/** /**
@ -581,7 +616,7 @@ public abstract class FileSystem extends Configured implements Closeable {
} }
if (uri != null) { if (uri != null) {
// canonicalize uri before comparing with this fs // canonicalize uri before comparing with this fs
uri = NetUtils.getCanonicalUri(uri, getDefaultPort()); uri = canonicalizeUri(uri);
thatAuthority = uri.getAuthority(); thatAuthority = uri.getAuthority();
if (thisAuthority == thatAuthority || // authorities match if (thisAuthority == thatAuthority || // authorities match
(thisAuthority != null && (thisAuthority != null &&

View File

@ -96,15 +96,17 @@ public class FilterFileSystem extends FileSystem {
return fs.getUri(); return fs.getUri();
} }
/**
* Returns a qualified URI whose scheme and authority identify this
* FileSystem.
*/
@Override @Override
protected URI getCanonicalUri() { protected URI getCanonicalUri() {
return fs.getCanonicalUri(); return fs.getCanonicalUri();
} }
@Override
protected URI canonicalizeUri(URI uri) {
return fs.canonicalizeUri(uri);
}
/** Make sure that a path specifies a FileSystem. */ /** Make sure that a path specifies a FileSystem. */
@Override @Override
public Path makeQualified(Path path) { public Path makeQualified(Path path) {

View File

@ -257,6 +257,10 @@ public class Path implements Comparable {
uri.getScheme() == null && uri.getAuthority() == null); uri.getScheme() == null && uri.getAuthority() == null);
} }
public boolean hasSchemeAndNoAuthority() {
return uri.getScheme() != null && uri.getAuthority() == null;
}
/** /**
* True if the path component (i.e. directory) of this URI is absolute. * True if the path component (i.e. directory) of this URI is absolute.
*/ */

View File

@ -137,10 +137,16 @@ class Jets3tFileSystemStore implements FileSystemStore {
@Override @Override
public boolean inodeExists(Path path) throws IOException { public boolean inodeExists(Path path) throws IOException {
InputStream in = get(pathToKey(path), true); String key = pathToKey(path);
InputStream in = get(key, true);
if (in == null) { if (in == null) {
if (isRoot(key)) {
storeINode(path, INode.DIRECTORY_INODE);
return true;
} else {
return false; return false;
} }
}
in.close(); in.close();
return true; return true;
} }
@ -211,7 +217,13 @@ class Jets3tFileSystemStore implements FileSystemStore {
@Override @Override
public INode retrieveINode(Path path) throws IOException { public INode retrieveINode(Path path) throws IOException {
return INode.deserialize(get(pathToKey(path), true)); String key = pathToKey(path);
InputStream in = get(key, true);
if (in == null && isRoot(key)) {
storeINode(path, INode.DIRECTORY_INODE);
return INode.DIRECTORY_INODE;
}
return INode.deserialize(in);
} }
@Override @Override
@ -366,6 +378,10 @@ class Jets3tFileSystemStore implements FileSystemStore {
return blockToKey(block.getId()); return blockToKey(block.getId());
} }
private boolean isRoot(String key) {
return key.isEmpty() || key.equals("/");
}
@Override @Override
public void purge() throws IOException { public void purge() throws IOException {
try { try {

View File

@ -252,29 +252,122 @@ public class S3FileSystem extends FileSystem {
@Override @Override
public boolean rename(Path src, Path dst) throws IOException { public boolean rename(Path src, Path dst) throws IOException {
Path absoluteSrc = makeAbsolute(src); Path absoluteSrc = makeAbsolute(src);
final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
INode srcINode = store.retrieveINode(absoluteSrc); INode srcINode = store.retrieveINode(absoluteSrc);
boolean debugEnabled = LOG.isDebugEnabled();
if (srcINode == null) { if (srcINode == null) {
// src path doesn't exist // src path doesn't exist
if (debugEnabled) {
LOG.debug(debugPreamble + "returning false as src does not exist");
}
return false; return false;
} }
Path absoluteDst = makeAbsolute(dst); Path absoluteDst = makeAbsolute(dst);
INode dstINode = store.retrieveINode(absoluteDst);
if (dstINode != null && dstINode.isDirectory()) { //validate the parent dir of the destination
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
dstINode = store.retrieveINode(absoluteDst);
}
if (dstINode != null) {
// dst path already exists - can't overwrite
return false;
}
Path dstParent = absoluteDst.getParent(); Path dstParent = absoluteDst.getParent();
if (dstParent != null) { if (dstParent != null) {
//if the dst parent is not root, make sure it exists
INode dstParentINode = store.retrieveINode(dstParent); INode dstParentINode = store.retrieveINode(dstParent);
if (dstParentINode == null || dstParentINode.isFile()) { if (dstParentINode == null) {
// dst parent doesn't exist or is a file // dst parent doesn't exist
if (debugEnabled) {
LOG.debug(debugPreamble +
"returning false as dst parent does not exist");
}
return false;
}
if (dstParentINode.isFile()) {
// dst parent exists but is a file
if (debugEnabled) {
LOG.debug(debugPreamble +
"returning false as dst parent exists and is a file");
}
return false; return false;
} }
} }
//get status of source
boolean srcIsFile = srcINode.isFile();
INode dstINode = store.retrieveINode(absoluteDst);
boolean destExists = dstINode != null;
boolean destIsDir = destExists && !dstINode.isFile();
if (srcIsFile) {
//source is a simple file
if (destExists) {
if (destIsDir) {
//outcome #1 dest exists and is dir -filename to subdir of dest
if (debugEnabled) {
LOG.debug(debugPreamble +
"copying src file under dest dir to " + absoluteDst);
}
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
} else {
//outcome #2 dest it's a file: fail iff different from src
boolean renamingOnToSelf = absoluteSrc.equals(absoluteDst);
if (debugEnabled) {
LOG.debug(debugPreamble +
"copying file onto file, outcome is " + renamingOnToSelf);
}
return renamingOnToSelf;
}
} else {
// #3 dest does not exist: use dest as path for rename
if (debugEnabled) {
LOG.debug(debugPreamble +
"copying file onto file");
}
}
} else {
//here the source exists and is a directory
// outcomes (given we know the parent dir exists if we get this far)
// #1 destination is a file: fail
// #2 destination is a directory: create a new dir under that one
// #3 destination doesn't exist: create a new dir with that name
// #3 and #4 are only allowed if the dest path is not == or under src
if (destExists) {
if (!destIsDir) {
// #1 destination is a file: fail
if (debugEnabled) {
LOG.debug(debugPreamble +
"returning false as src is a directory, but not dest");
}
return false;
} else {
// the destination dir exists
// destination for rename becomes a subdir of the target name
absoluteDst = new Path(absoluteDst, absoluteSrc.getName());
if (debugEnabled) {
LOG.debug(debugPreamble +
"copying src dir under dest dir to " + absoluteDst);
}
}
}
//the final destination directory is now know, so validate it for
//illegal moves
if (absoluteSrc.equals(absoluteDst)) {
//you can't rename a directory onto itself
if (debugEnabled) {
LOG.debug(debugPreamble +
"Dest==source && isDir -failing");
}
return false;
}
if (absoluteDst.toString().startsWith(absoluteSrc.toString() + "/")) {
//you can't move a directory under itself
if (debugEnabled) {
LOG.debug(debugPreamble +
"dst is equal to or under src dir -failing");
}
return false;
}
}
//here the dest path is set up -so rename
return renameRecursive(absoluteSrc, absoluteDst); return renameRecursive(absoluteSrc, absoluteDst);
} }

View File

@ -582,35 +582,58 @@ public class NativeS3FileSystem extends FileSystem {
public boolean rename(Path src, Path dst) throws IOException { public boolean rename(Path src, Path dst) throws IOException {
String srcKey = pathToKey(makeAbsolute(src)); String srcKey = pathToKey(makeAbsolute(src));
final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - ";
if (srcKey.length() == 0) { if (srcKey.length() == 0) {
// Cannot rename root of file system // Cannot rename root of file system
if (LOG.isDebugEnabled()) {
LOG.debug(debugPreamble +
"returning false as cannot rename the root of a filesystem");
}
return false; return false;
} }
final String debugPreamble = "Renaming '" + src + "' to '" + dst + "' - "; //get status of source
boolean srcIsFile;
try {
srcIsFile = getFileStatus(src).isFile();
} catch (FileNotFoundException e) {
//bail out fast if the source does not exist
if (LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "returning false as src does not exist");
}
return false;
}
// Figure out the final destination // Figure out the final destination
String dstKey; String dstKey = pathToKey(makeAbsolute(dst));
try { try {
boolean dstIsFile = getFileStatus(dst).isFile(); boolean dstIsFile = getFileStatus(dst).isFile();
if (dstIsFile) { if (dstIsFile) {
//destination is a file.
//you can't copy a file or a directory onto an existing file
//except for the special case of dest==src, which is a no-op
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + LOG.debug(debugPreamble +
"returning false as dst is an already existing file"); "returning without rename as dst is an already existing file");
} }
return false; //exit, returning true iff the rename is onto self
return srcKey.equals(dstKey);
} else { } else {
//destination exists and is a directory
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "using dst as output directory"); LOG.debug(debugPreamble + "using dst as output directory");
} }
//destination goes under the dst path, with the name of the
//source entry
dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName()))); dstKey = pathToKey(makeAbsolute(new Path(dst, src.getName())));
} }
} catch (FileNotFoundException e) { } catch (FileNotFoundException e) {
//destination does not exist => the source file or directory
//is copied over with the name of the destination
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "using dst as output destination"); LOG.debug(debugPreamble + "using dst as output destination");
} }
dstKey = pathToKey(makeAbsolute(dst));
try { try {
if (getFileStatus(dst.getParent()).isFile()) { if (getFileStatus(dst.getParent()).isFile()) {
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
@ -628,16 +651,17 @@ public class NativeS3FileSystem extends FileSystem {
} }
} }
boolean srcIsFile; //rename to self behavior follows Posix rules and is different
try { //for directories and files -the return code is driven by src type
srcIsFile = getFileStatus(src).isFile(); if (srcKey.equals(dstKey)) {
} catch (FileNotFoundException e) { //fully resolved destination key matches source: fail
if(LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "returning false as src does not exist"); LOG.debug(debugPreamble + "renamingToSelf; returning true");
} }
return false; return true;
} }
if (srcIsFile) { if (srcIsFile) {
//source is a file; COPY then DELETE
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + LOG.debug(debugPreamble +
"src is file, so doing copy then delete in S3"); "src is file, so doing copy then delete in S3");
@ -645,9 +669,19 @@ public class NativeS3FileSystem extends FileSystem {
store.copy(srcKey, dstKey); store.copy(srcKey, dstKey);
store.delete(srcKey); store.delete(srcKey);
} else { } else {
//src is a directory
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug(debugPreamble + "src is directory, so copying contents"); LOG.debug(debugPreamble + "src is directory, so copying contents");
} }
//Verify dest is not a child of the parent
if (dstKey.startsWith(srcKey + "/")) {
if (LOG.isDebugEnabled()) {
LOG.debug(
debugPreamble + "cannot rename a directory to a subdirectory of self");
}
return false;
}
//create the subdir under the destination
store.storeEmptyFile(dstKey + FOLDER_SUFFIX); store.storeEmptyFile(dstKey + FOLDER_SUFFIX);
List<String> keysToDelete = new ArrayList<String>(); List<String> keysToDelete = new ArrayList<String>();

View File

@ -257,6 +257,7 @@ public class Client {
private final ConnectionId remoteId; // connection id private final ConnectionId remoteId; // connection id
private AuthMethod authMethod; // authentication method private AuthMethod authMethod; // authentication method
private Token<? extends TokenIdentifier> token; private Token<? extends TokenIdentifier> token;
private int serviceClass;
private SaslRpcClient saslRpcClient; private SaslRpcClient saslRpcClient;
private Socket socket = null; // connected socket private Socket socket = null; // connected socket
@ -279,7 +280,7 @@ public class Client {
private final Object sendRpcRequestLock = new Object(); private final Object sendRpcRequestLock = new Object();
public Connection(ConnectionId remoteId) throws IOException { public Connection(ConnectionId remoteId, int serviceClass) throws IOException {
this.remoteId = remoteId; this.remoteId = remoteId;
this.server = remoteId.getAddress(); this.server = remoteId.getAddress();
if (server.isUnresolved()) { if (server.isUnresolved()) {
@ -296,6 +297,7 @@ public class Client {
this.tcpNoDelay = remoteId.getTcpNoDelay(); this.tcpNoDelay = remoteId.getTcpNoDelay();
this.doPing = remoteId.getDoPing(); this.doPing = remoteId.getDoPing();
this.pingInterval = remoteId.getPingInterval(); this.pingInterval = remoteId.getPingInterval();
this.serviceClass = serviceClass;
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("The ping interval is " + this.pingInterval + " ms."); LOG.debug("The ping interval is " + this.pingInterval + " ms.");
} }
@ -747,7 +749,9 @@ public class Client {
* +----------------------------------+ * +----------------------------------+
* | "hrpc" 4 bytes | * | "hrpc" 4 bytes |
* +----------------------------------+ * +----------------------------------+
* | Version (1 bytes) | * | Version (1 byte) |
* +----------------------------------+
* | Service Class (1 byte) |
* +----------------------------------+ * +----------------------------------+
* | Authmethod (1 byte) | * | Authmethod (1 byte) |
* +----------------------------------+ * +----------------------------------+
@ -760,6 +764,7 @@ public class Client {
// Write out the header, version and authentication method // Write out the header, version and authentication method
out.write(Server.HEADER.array()); out.write(Server.HEADER.array());
out.write(Server.CURRENT_VERSION); out.write(Server.CURRENT_VERSION);
out.write(serviceClass);
authMethod.write(out); authMethod.write(out);
Server.IpcSerializationType.PROTOBUF.write(out); Server.IpcSerializationType.PROTOBUF.write(out);
out.flush(); out.flush();
@ -1192,6 +1197,20 @@ public class Client {
return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId); return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
} }
/**
* Same as {@link #call(Writable, InetSocketAddress,
* Class, UserGroupInformation, int, Configuration)}
* except that specifying serviceClass.
*/
public Writable call(Writable param, InetSocketAddress addr,
Class<?> protocol, UserGroupInformation ticket,
int rpcTimeout, int serviceClass, Configuration conf)
throws InterruptedException, IOException {
ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
ticket, rpcTimeout, conf);
return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass);
}
/** /**
* Make a call, passing <code>param</code>, to the IPC server running at * Make a call, passing <code>param</code>, to the IPC server running at
* <code>address</code> which is servicing the <code>protocol</code> protocol, * <code>address</code> which is servicing the <code>protocol</code> protocol,
@ -1231,8 +1250,26 @@ public class Client {
*/ */
public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest, public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
ConnectionId remoteId) throws InterruptedException, IOException { ConnectionId remoteId) throws InterruptedException, IOException {
return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT);
}
/**
* Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
* <code>remoteId</code>, returning the rpc respond.
*
* @param rpcKind
* @param rpcRequest - contains serialized method and method parameters
* @param remoteId - the target rpc server
* @param serviceClass - service class for RPC
* @returns the rpc response
* Throws exceptions if there are network problems or if the remote code
* threw an exception.
*/
public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
ConnectionId remoteId, int serviceClass)
throws InterruptedException, IOException {
Call call = new Call(rpcKind, rpcRequest); Call call = new Call(rpcKind, rpcRequest);
Connection connection = getConnection(remoteId, call); Connection connection = getConnection(remoteId, call, serviceClass);
try { try {
connection.sendRpcRequest(call); // send the rpc request connection.sendRpcRequest(call); // send the rpc request
} catch (RejectedExecutionException e) { } catch (RejectedExecutionException e) {
@ -1289,7 +1326,7 @@ public class Client {
/** Get a connection from the pool, or create a new one and add it to the /** Get a connection from the pool, or create a new one and add it to the
* pool. Connections to a given ConnectionId are reused. */ * pool. Connections to a given ConnectionId are reused. */
private Connection getConnection(ConnectionId remoteId, private Connection getConnection(ConnectionId remoteId,
Call call) Call call, int serviceClass)
throws IOException, InterruptedException { throws IOException, InterruptedException {
if (!running.get()) { if (!running.get()) {
// the client is stopped // the client is stopped
@ -1304,7 +1341,7 @@ public class Client {
synchronized (connections) { synchronized (connections) {
connection = connections.get(remoteId); connection = connections.get(remoteId);
if (connection == null) { if (connection == null) {
connection = new Connection(remoteId); connection = new Connection(remoteId, serviceClass);
connections.put(remoteId, connection); connections.put(remoteId, connection);
} }
} }

View File

@ -77,12 +77,12 @@ import com.google.protobuf.BlockingService;
@InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", "Yarn" }) @InterfaceAudience.LimitedPrivate(value = { "Common", "HDFS", "MapReduce", "Yarn" })
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class RPC { public class RPC {
final static int RPC_SERVICE_CLASS_DEFAULT = 0;
public enum RpcKind { public enum RpcKind {
RPC_BUILTIN ((short) 1), // Used for built in calls by tests RPC_BUILTIN ((short) 1), // Used for built in calls by tests
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
private static final short FIRST_INDEX = RPC_BUILTIN.value;
public final short value; //TODO make it private public final short value; //TODO make it private
RpcKind(short val) { RpcKind(short val) {

View File

@ -438,6 +438,11 @@ public abstract class Server {
return Arrays.asList(handlers); return Arrays.asList(handlers);
} }
@VisibleForTesting
List<Connection> getConnections() {
return connectionList;
}
/** /**
* Refresh the service authorization ACL for the service handled by this server. * Refresh the service authorization ACL for the service handled by this server.
*/ */
@ -1104,6 +1109,7 @@ public abstract class Server {
private ByteBuffer connectionHeaderBuf = null; private ByteBuffer connectionHeaderBuf = null;
private ByteBuffer unwrappedData; private ByteBuffer unwrappedData;
private ByteBuffer unwrappedDataLengthBuffer; private ByteBuffer unwrappedDataLengthBuffer;
private int serviceClass;
UserGroupInformation user = null; UserGroupInformation user = null;
public UserGroupInformation attemptingUser = null; // user name before auth public UserGroupInformation attemptingUser = null; // user name before auth
@ -1231,7 +1237,8 @@ public abstract class Server {
rpcMetrics.incrAuthenticationFailures(); rpcMetrics.incrAuthenticationFailures();
String clientIP = this.toString(); String clientIP = this.toString();
// attempting user could be null // attempting user could be null
AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser); AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser +
" (" + e.getLocalizedMessage() + ")");
throw e; throw e;
} }
if (saslServer.isComplete() && replyToken == null) { if (saslServer.isComplete() && replyToken == null) {
@ -1314,14 +1321,17 @@ public abstract class Server {
if (!connectionHeaderRead) { if (!connectionHeaderRead) {
//Every connection is expected to send the header. //Every connection is expected to send the header.
if (connectionHeaderBuf == null) { if (connectionHeaderBuf == null) {
connectionHeaderBuf = ByteBuffer.allocate(3); connectionHeaderBuf = ByteBuffer.allocate(4);
} }
count = channelRead(channel, connectionHeaderBuf); count = channelRead(channel, connectionHeaderBuf);
if (count < 0 || connectionHeaderBuf.remaining() > 0) { if (count < 0 || connectionHeaderBuf.remaining() > 0) {
return count; return count;
} }
int version = connectionHeaderBuf.get(0); int version = connectionHeaderBuf.get(0);
byte[] method = new byte[] {connectionHeaderBuf.get(1)}; // TODO we should add handler for service class later
this.setServiceClass(connectionHeaderBuf.get(1));
byte[] method = new byte[] {connectionHeaderBuf.get(2)};
authMethod = AuthMethod.read(new DataInputStream( authMethod = AuthMethod.read(new DataInputStream(
new ByteArrayInputStream(method))); new ByteArrayInputStream(method)));
dataLengthBuffer.flip(); dataLengthBuffer.flip();
@ -1345,7 +1355,7 @@ public abstract class Server {
} }
IpcSerializationType serializationType = IpcSerializationType IpcSerializationType serializationType = IpcSerializationType
.fromByte(connectionHeaderBuf.get(2)); .fromByte(connectionHeaderBuf.get(3));
if (serializationType != IpcSerializationType.PROTOBUF) { if (serializationType != IpcSerializationType.PROTOBUF) {
respondUnsupportedSerialization(serializationType); respondUnsupportedSerialization(serializationType);
return -1; return -1;
@ -1735,6 +1745,22 @@ public abstract class Server {
return true; return true;
} }
/**
* Get service class for connection
* @return the serviceClass
*/
public int getServiceClass() {
return serviceClass;
}
/**
* Set service class for connection
* @param serviceClass the serviceClass to set
*/
public void setServiceClass(int serviceClass) {
this.serviceClass = serviceClass;
}
private synchronized void close() throws IOException { private synchronized void close() throws IOException {
disposeSasl(); disposeSasl();
data = null; data = null;

View File

@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.Hashtable; import java.util.Hashtable;
import java.util.List; import java.util.List;
import javax.naming.CommunicationException;
import javax.naming.Context; import javax.naming.Context;
import javax.naming.NamingEnumeration; import javax.naming.NamingEnumeration;
import javax.naming.NamingException; import javax.naming.NamingException;
@ -166,6 +167,8 @@ public class LdapGroupsMapping
private String groupMemberAttr; private String groupMemberAttr;
private String groupNameAttr; private String groupNameAttr;
public static int RECONNECT_RETRY_COUNT = 3;
/** /**
* Returns list of groups for a user. * Returns list of groups for a user.
* *
@ -178,9 +181,42 @@ public class LdapGroupsMapping
*/ */
@Override @Override
public synchronized List<String> getGroups(String user) throws IOException { public synchronized List<String> getGroups(String user) throws IOException {
List<String> groups = new ArrayList<String>(); List<String> emptyResults = new ArrayList<String>();
/*
* Normal garbage collection takes care of removing Context instances when they are no longer in use.
* Connections used by Context instances being garbage collected will be closed automatically.
* So in case connection is closed and gets CommunicationException, retry some times with new new DirContext/connection.
*/
try {
return doGetGroups(user);
} catch (CommunicationException e) {
LOG.warn("Connection is closed, will try to reconnect");
} catch (NamingException e) {
LOG.warn("Exception trying to get groups for user " + user, e);
return emptyResults;
}
int retryCount = 0;
while (retryCount ++ < RECONNECT_RETRY_COUNT) {
//reset ctx so that new DirContext can be created with new connection
this.ctx = null;
try { try {
return doGetGroups(user);
} catch (CommunicationException e) {
LOG.warn("Connection being closed, reconnecting failed, retryCount = " + retryCount);
} catch (NamingException e) {
LOG.warn("Exception trying to get groups for user " + user, e);
return emptyResults;
}
}
return emptyResults;
}
List<String> doGetGroups(String user) throws NamingException {
List<String> groups = new ArrayList<String>();
DirContext ctx = getDirContext(); DirContext ctx = getDirContext();
// Search for the user. We'll only ever need to look at the first result // Search for the user. We'll only ever need to look at the first result
@ -203,10 +239,6 @@ public class LdapGroupsMapping
groups.add(groupName.get().toString()); groups.add(groupName.get().toString());
} }
} }
} catch (NamingException e) {
LOG.warn("Exception trying to get groups for user " + user, e);
return new ArrayList<String>();
}
return groups; return groups;
} }

View File

@ -164,7 +164,9 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
} else { } else {
keystore.load(null, null); keystore.load(null, null);
} }
KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509"); KeyManagerFactory keyMgrFactory = KeyManagerFactory
.getInstance(SSLFactory.SSLCERTIFICATE);
keyMgrFactory.init(keystore, (keystorePassword != null) ? keyMgrFactory.init(keystore, (keystorePassword != null) ?
keystorePassword.toCharArray() : null); keystorePassword.toCharArray() : null);
keyManagers = keyMgrFactory.getKeyManagers(); keyManagers = keyMgrFactory.getKeyManagers();

View File

@ -170,7 +170,7 @@ public final class ReloadingX509TrustManager
} }
TrustManagerFactory trustManagerFactory = TrustManagerFactory trustManagerFactory =
TrustManagerFactory.getInstance("SunX509"); TrustManagerFactory.getInstance(SSLFactory.SSLCERTIFICATE);
trustManagerFactory.init(ks); trustManagerFactory.init(ks);
TrustManager[] trustManagers = trustManagerFactory.getTrustManagers(); TrustManager[] trustManagers = trustManagerFactory.getTrustManagers();
for (TrustManager trustManager1 : trustManagers) { for (TrustManager trustManager1 : trustManagers) {

View File

@ -58,6 +58,9 @@ public class SSLFactory implements ConnectionConfigurator {
"hadoop.ssl.client.conf"; "hadoop.ssl.client.conf";
public static final String SSL_SERVER_CONF_KEY = public static final String SSL_SERVER_CONF_KEY =
"hadoop.ssl.server.conf"; "hadoop.ssl.server.conf";
private static final boolean IBMJAVA =
System.getProperty("java.vendor").contains("IBM");
public static final String SSLCERTIFICATE = IBMJAVA?"ibmX509":"SunX509";
public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false; public static final boolean DEFAULT_SSL_REQUIRE_CLIENT_CERT = false;

View File

@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.EnumSet; import java.util.EnumSet;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
@ -1165,6 +1167,40 @@ public abstract class FileContextMainOperationsBaseTest {
} }
} }
/**
* Test that URIs with a scheme, no authority, and absolute path component
* resolve with the authority of the default FS.
*/
@Test(timeout=30000)
public void testAbsolutePathSchemeNoAuthority() throws IOException,
URISyntaxException {
Path file = getTestRootPath(fc, "test/file");
createFile(file);
URI uri = file.toUri();
URI noAuthorityUri = new URI(uri.getScheme(), null, uri.getPath(),
uri.getQuery(), uri.getFragment());
Path noAuthority = new Path(noAuthorityUri);
Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
}
/**
* Test that URIs with a scheme, no authority, and relative path component
* resolve with the authority of the default FS.
*/
@Test(timeout=30000)
public void testRelativePathSchemeNoAuthority() throws IOException,
URISyntaxException {
Path workDir = new Path(getAbsoluteTestRootPath(fc), new Path("test"));
fc.setWorkingDirectory(workDir);
Path file = new Path(workDir, "file");
createFile(file);
URI uri = file.toUri();
URI noAuthorityUri = new URI(uri.getScheme() + ":file");
System.out.println(noAuthorityUri);
Path noAuthority = new Path(noAuthorityUri);
Assert.assertEquals(fc.getFileStatus(file), fc.getFileStatus(noAuthority));
}
protected void createFile(Path path) throws IOException { protected void createFile(Path path) throws IOException {
FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE), FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
Options.CreateOpts.createParent()); Options.CreateOpts.createParent());

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.Locale;
import junit.framework.TestCase; import junit.framework.TestCase;
@ -51,8 +52,14 @@ public abstract class FileSystemContractBaseTest extends TestCase {
@Override @Override
protected void tearDown() throws Exception { protected void tearDown() throws Exception {
try {
if (fs != null) {
fs.delete(path("/test"), true); fs.delete(path("/test"), true);
} }
} catch (IOException e) {
LOG.error("Error deleting /test: " + e, e);
}
}
protected int getBlockSize() { protected int getBlockSize() {
return 1024; return 1024;
@ -62,10 +69,23 @@ public abstract class FileSystemContractBaseTest extends TestCase {
return "/user/" + System.getProperty("user.name"); return "/user/" + System.getProperty("user.name");
} }
/**
* Override this if the filesystem does not support rename
* @return true if the FS supports rename -and rename related tests
* should be run
*/
protected boolean renameSupported() { protected boolean renameSupported() {
return true; return true;
} }
/**
* Override this if the filesystem is not case sensitive
* @return true if the case detection/preservation tests should run
*/
protected boolean filesystemIsCaseSensitive() {
return true;
}
public void testFsStatus() throws Exception { public void testFsStatus() throws Exception {
FsStatus fsStatus = fs.getStatus(); FsStatus fsStatus = fs.getStatus();
assertNotNull(fsStatus); assertNotNull(fsStatus);
@ -109,6 +129,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
assertTrue(fs.mkdirs(testDir)); assertTrue(fs.mkdirs(testDir));
assertTrue(fs.exists(testDir)); assertTrue(fs.exists(testDir));
assertTrue("Should be a directory", fs.isDirectory(testDir));
assertFalse(fs.isFile(testDir)); assertFalse(fs.isFile(testDir));
Path parentDir = testDir.getParent(); Path parentDir = testDir.getParent();
@ -365,8 +386,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
Path dst = path("/test/new/newdir"); Path dst = path("/test/new/newdir");
fs.mkdirs(dst); fs.mkdirs(dst);
rename(src, dst, true, false, true); rename(src, dst, true, false, true);
assertTrue("Destination changed", assertIsFile(path("/test/new/newdir/file"));
fs.exists(path("/test/new/newdir/file")));
} }
public void testRenameDirectoryMoveToNonExistentDirectory() public void testRenameDirectoryMoveToNonExistentDirectory()
@ -466,9 +486,9 @@ public abstract class FileSystemContractBaseTest extends TestCase {
private void rename(Path src, Path dst, boolean renameSucceeded, private void rename(Path src, Path dst, boolean renameSucceeded,
boolean srcExists, boolean dstExists) throws IOException { boolean srcExists, boolean dstExists) throws IOException {
assertEquals("Rename result", renameSucceeded, fs.rename(src, dst)); assertEquals("mv " + src + " " + dst,renameSucceeded, fs.rename(src, dst));
assertEquals("Source exists", srcExists, fs.exists(src)); assertEquals("Source exists: " + src, srcExists, fs.exists(src));
assertEquals("Destination exists", dstExists, fs.exists(dst)); assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
} }
/** /**
@ -494,6 +514,253 @@ public abstract class FileSystemContractBaseTest extends TestCase {
writeAndRead(path, filedata2, blockSize * 2, true, false); writeAndRead(path, filedata2, blockSize * 2, true, false);
} }
/**
* Assert that a filesystem is case sensitive.
* This is done by creating a mixed-case filename and asserting that
* its lower case version is not there.
* @throws Exception
*/
public void testFilesystemIsCaseSensitive() throws Exception {
if (!filesystemIsCaseSensitive()) {
LOG.info("Skipping test");
return;
}
String mixedCaseFilename = "/test/UPPER.TXT";
Path upper = path(mixedCaseFilename);
Path lower = path(mixedCaseFilename.toLowerCase(Locale.ENGLISH));
assertFalse("File exists" + upper, fs.exists(upper));
assertFalse("File exists" + lower, fs.exists(lower));
FSDataOutputStream out = fs.create(upper);
out.writeUTF("UPPER");
out.close();
FileStatus upperStatus = fs.getFileStatus(upper);
assertTrue("File does not exist" + upper, fs.exists(upper));
//verify the lower-case version of the filename doesn't exist
assertFalse("File exists" + lower, fs.exists(lower));
//now overwrite the lower case version of the filename with a
//new version.
out = fs.create(lower);
out.writeUTF("l");
out.close();
assertTrue("File does not exist" + lower, fs.exists(lower));
//verify the length of the upper file hasn't changed
FileStatus newStatus = fs.getFileStatus(upper);
assertEquals("Expected status:" + upperStatus
+ " actual status " + newStatus,
upperStatus.getLen(),
newStatus.getLen()); }
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
* @throws Exception on failures
*/
public void testZeroByteFilesAreFiles() throws Exception {
Path src = path("/test/testZeroByteFilesAreFiles");
//create a zero byte file
FSDataOutputStream out = fs.create(src);
out.close();
assertIsFile(src);
}
/**
* Asserts that a zero byte file has a status of file and not
* directory or symlink
* @throws Exception on failures
*/
public void testMultiByteFilesAreFiles() throws Exception {
Path src = path("/test/testMultiByteFilesAreFiles");
FSDataOutputStream out = fs.create(src);
out.writeUTF("testMultiByteFilesAreFiles");
out.close();
assertIsFile(src);
}
/**
* Assert that root directory renames are not allowed
* @throws Exception on failures
*/
public void testRootDirAlwaysExists() throws Exception {
//this will throw an exception if the path is not found
fs.getFileStatus(path("/"));
//this catches overrides of the base exists() method that don't
//use getFileStatus() as an existence probe
assertTrue("FileSystem.exists() fails for root", fs.exists(path("/")));
}
/**
* Assert that root directory renames are not allowed
* @throws Exception on failures
*/
public void testRenameRootDirForbidden() throws Exception {
if (!renameSupported()) return;
rename(path("/"),
path("/test/newRootDir"),
false, true, false);
}
/**
* Assert that renaming a parent directory to be a child
* of itself is forbidden
* @throws Exception on failures
*/
public void testRenameChildDirForbidden() throws Exception {
if (!renameSupported()) return;
LOG.info("testRenameChildDirForbidden");
Path parentdir = path("/test/parentdir");
fs.mkdirs(parentdir);
Path childFile = new Path(parentdir, "childfile");
createFile(childFile);
//verify one level down
Path childdir = new Path(parentdir, "childdir");
rename(parentdir, childdir, false, true, false);
//now another level
fs.mkdirs(childdir);
Path childchilddir = new Path(childdir, "childdir");
rename(parentdir, childchilddir, false, true, false);
}
/**
* This a sanity check to make sure that any filesystem's handling of
* renames doesn't cause any regressions
*/
public void testRenameToDirWithSamePrefixAllowed() throws Throwable {
if (!renameSupported()) return;
Path parentdir = path("test/parentdir");
fs.mkdirs(parentdir);
Path dest = path("test/parentdirdest");
rename(parentdir, dest, true, false, true);
}
/**
* trying to rename a directory onto itself should fail,
* preserving everything underneath.
*/
public void testRenameDirToSelf() throws Throwable {
if (!renameSupported()) {
return;
}
Path parentdir = path("test/parentdir");
fs.mkdirs(parentdir);
Path child = new Path(parentdir, "child");
createFile(child);
rename(parentdir, parentdir, false, true, true);
//verify the child is still there
assertIsFile(child);
}
/**
* trying to rename a directory onto its parent dir will build
* a destination path of its original name, which should then fail.
* The source path and the destination path should still exist afterwards
*/
public void testMoveDirUnderParent() throws Throwable {
if (!renameSupported()) {
return;
}
Path testdir = path("test/dir");
fs.mkdirs(testdir);
Path parent = testdir.getParent();
//the outcome here is ambiguous, so is not checked
fs.rename(testdir, parent);
assertEquals("Source exists: " + testdir, true, fs.exists(testdir));
assertEquals("Destination exists" + parent, true, fs.exists(parent));
}
/**
* trying to rename a file onto itself should succeed (it's a no-op)
*
*/
public void testRenameFileToSelf() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
//verify the file is still there
assertIsFile(filepath);
}
/**
* trying to move a file into it's parent dir should succeed
* again: no-op
*/
public void testMoveFileUnderParent() throws Throwable {
if (!renameSupported()) return;
Path filepath = path("test/file");
createFile(filepath);
//HDFS expects rename src, src -> true
rename(filepath, filepath, true, true, true);
//verify the file is still there
assertIsFile(filepath);
}
public void testLSRootDir() throws Throwable {
Path dir = path("/");
Path child = path("/test");
createFile(child);
assertListFilesFinds(dir, child);
}
public void testListStatusRootDir() throws Throwable {
Path dir = path("/");
Path child = path("/test");
createFile(child);
assertListStatusFinds(dir, child);
}
private void assertListFilesFinds(Path dir, Path subdir) throws IOException {
RemoteIterator<LocatedFileStatus> iterator =
fs.listFiles(dir, true);
boolean found = false;
StringBuilder builder = new StringBuilder();
while (iterator.hasNext()) {
LocatedFileStatus next = iterator.next();
builder.append(next.toString()).append('\n');
if (next.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir
+ " not found in directory " + dir + ":" + builder,
found);
}
private void assertListStatusFinds(Path dir, Path subdir) throws IOException {
FileStatus[] stats = fs.listStatus(dir);
boolean found = false;
StringBuilder builder = new StringBuilder();
for (FileStatus stat : stats) {
builder.append(stat.toString()).append('\n');
if (stat.getPath().equals(subdir)) {
found = true;
}
}
assertTrue("Path " + subdir
+ " not found in directory " + dir + ":" + builder,
found);
}
/**
* Assert that a file exists and whose {@link FileStatus} entry
* declares that this is a file and not a symlink or directory.
* @param filename name of the file
* @throws IOException IO problems during file operations
*/
private void assertIsFile(Path filename) throws IOException {
assertTrue("Does not exist: " + filename, fs.exists(filename));
FileStatus status = fs.getFileStatus(filename);
String fileInfo = filename + " " + status;
assertTrue("Not a file " + fileInfo, status.isFile());
assertFalse("File claims to be a symlink " + fileInfo,
status.isSymlink());
assertFalse("File claims to be a directory " + fileInfo,
status.isDirectory());
}
/** /**
* *
* Write a file and read it in, validating the result. Optional flags control * Write a file and read it in, validating the result. Optional flags control

View File

@ -26,6 +26,7 @@ import java.net.URI;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.NetUtilsTestResolver; import org.apache.hadoop.security.NetUtilsTestResolver;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -312,6 +313,11 @@ public class TestFileSystemCanonicalization {
return defaultPort; return defaultPort;
} }
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
@Override @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException { public FSDataInputStream open(Path f, int bufferSize) throws IOException {
throw new IOException("not supposed to be here"); throw new IOException("not supposed to be here");

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileSystem.Statistics;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import static org.apache.hadoop.fs.FileSystemTestHelper.*; import static org.apache.hadoop.fs.FileSystemTestHelper.*;
@ -266,9 +267,14 @@ public class TestLocalFileSystem {
LocalFileSystem fs = FileSystem.getLocal(conf); LocalFileSystem fs = FileSystem.getLocal(conf);
Path path = new Path(TEST_ROOT_DIR, "test-file"); Path path = new Path(TEST_ROOT_DIR, "test-file");
writeFile(fs, path, 1); writeFile(fs, path, 1);
BufferedFSInputStream bis = new BufferedFSInputStream( BufferedFSInputStream bis = null;
new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024); try {
bis = new BufferedFSInputStream(new RawLocalFileSystem()
.new LocalFSFileInputStream(path), 1024);
assertNotNull(bis.getFileDescriptor()); assertNotNull(bis.getFileDescriptor());
} finally {
IOUtils.cleanup(null, bis);
}
} }
@Test @Test

View File

@ -50,6 +50,7 @@ class InMemoryFileSystemStore implements FileSystemStore {
@Override @Override
public void initialize(URI uri, Configuration conf) { public void initialize(URI uri, Configuration conf) {
this.conf = conf; this.conf = conf;
inodes.put(new Path("/"), INode.DIRECTORY_INODE);
} }
@Override @Override

View File

@ -51,7 +51,7 @@ public abstract class NativeS3FileSystemContractBaseTest
public void testListStatusForRoot() throws Exception { public void testListStatusForRoot() throws Exception {
FileStatus[] paths = fs.listStatus(path("/")); FileStatus[] paths = fs.listStatus(path("/"));
assertEquals(0, paths.length); assertEquals("Root directory is not empty; ", 0, paths.length);
Path testDir = path("/test"); Path testDir = path("/test");
assertTrue(fs.mkdirs(testDir)); assertTrue(fs.mkdirs(testDir));

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.ipc.Server.Connection;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.net.ConnectTimeoutException;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -520,11 +521,53 @@ public class TestIPC {
} }
} }
/**
* Check service class byte in IPC header is correct on wire.
*/
@Test(timeout=60000)
public void testIpcWithServiceClass() throws Exception {
// start server
Server server = new TestServer(5, false);
InetSocketAddress addr = NetUtils.getConnectAddress(server);
server.start();
// start client
Client.setConnectTimeout(conf, 10000);
callAndVerify(server, addr, 0, true);
// Service Class is low to -128 as byte on wire.
// -128 shouldn't be casted on wire but -129 should.
callAndVerify(server, addr, -128, true);
callAndVerify(server, addr, -129, false);
// Service Class is up to 127.
// 127 shouldn't be casted on wire but 128 should.
callAndVerify(server, addr, 127, true);
callAndVerify(server, addr, 128, false);
server.stop();
}
/**
* Make a call from a client and verify if header info is changed in server side
*/
private void callAndVerify(Server server, InetSocketAddress addr,
int serviceClass, boolean noChanged) throws Exception{
Client client = new Client(LongWritable.class, conf);
client.call(new LongWritable(RANDOM.nextLong()),
addr, null, null, MIN_SLEEP_TIME, serviceClass, conf);
Connection connection = server.getConnections().get(0);
int serviceClass2 = connection.getServiceClass();
assertFalse(noChanged ^ serviceClass == serviceClass2);
client.stop();
}
/** /**
* Check that file descriptors aren't leaked by starting * Check that file descriptors aren't leaked by starting
* and stopping IPC servers. * and stopping IPC servers.
*/ */
@Test @Test(timeout=60000)
public void testSocketLeak() throws Exception { public void testSocketLeak() throws Exception {
Assume.assumeTrue(FD_DIR.exists()); // only run on Linux Assume.assumeTrue(FD_DIR.exists()); // only run on Linux

View File

@ -26,6 +26,7 @@ import java.io.Writer;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import javax.naming.CommunicationException;
import javax.naming.NamingEnumeration; import javax.naming.NamingEnumeration;
import javax.naming.NamingException; import javax.naming.NamingException;
import javax.naming.directory.Attribute; import javax.naming.directory.Attribute;
@ -46,21 +47,15 @@ public class TestLdapGroupsMapping {
private DirContext mockContext; private DirContext mockContext;
private LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping()); private LdapGroupsMapping mappingSpy = spy(new LdapGroupsMapping());
private NamingEnumeration mockUserNamingEnum = mock(NamingEnumeration.class);
private NamingEnumeration mockGroupNamingEnum = mock(NamingEnumeration.class);
private String[] testGroups = new String[] {"group1", "group2"};
@Before @Before
public void setupMocks() throws NamingException { public void setupMocks() throws NamingException {
mockContext = mock(DirContext.class); mockContext = mock(DirContext.class);
doReturn(mockContext).when(mappingSpy).getDirContext(); doReturn(mockContext).when(mappingSpy).getDirContext();
NamingEnumeration mockUserNamingEnum = mock(NamingEnumeration.class);
NamingEnumeration mockGroupNamingEnum = mock(NamingEnumeration.class);
// The search functionality of the mock context is reused, so we will
// return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
SearchResult mockUserResult = mock(SearchResult.class); SearchResult mockUserResult = mock(SearchResult.class);
// We only ever call hasMoreElements once for the user NamingEnum, so // We only ever call hasMoreElements once for the user NamingEnum, so
// we can just have one return value // we can just have one return value
@ -76,23 +71,57 @@ public class TestLdapGroupsMapping {
// Define the attribute for the name of the first group // Define the attribute for the name of the first group
Attribute group1Attr = new BasicAttribute("cn"); Attribute group1Attr = new BasicAttribute("cn");
group1Attr.add("group1"); group1Attr.add(testGroups[0]);
Attributes group1Attrs = new BasicAttributes(); Attributes group1Attrs = new BasicAttributes();
group1Attrs.put(group1Attr); group1Attrs.put(group1Attr);
// Define the attribute for the name of the second group // Define the attribute for the name of the second group
Attribute group2Attr = new BasicAttribute("cn"); Attribute group2Attr = new BasicAttribute("cn");
group2Attr.add("group2"); group2Attr.add(testGroups[1]);
Attributes group2Attrs = new BasicAttributes(); Attributes group2Attrs = new BasicAttributes();
group2Attrs.put(group2Attr); group2Attrs.put(group2Attr);
// This search result gets reused, so return group1, then group2 // This search result gets reused, so return group1, then group2
when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs); when(mockGroupResult.getAttributes()).thenReturn(group1Attrs, group2Attrs);
} }
@Test @Test
public void testGetGroups() throws IOException, NamingException { public void testGetGroups() throws IOException, NamingException {
// The search functionality of the mock context is reused, so we will
// return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
doTestGetGroups(Arrays.asList(testGroups), 2);
}
@Test
public void testGetGroupsWithConnectionClosed() throws IOException, NamingException {
// The case mocks connection is closed/gc-ed, so the first search call throws CommunicationException,
// then after reconnected return the user NamingEnumeration first, and then the group
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenThrow(new CommunicationException("Connection is closed"))
.thenReturn(mockUserNamingEnum, mockGroupNamingEnum);
// Although connection is down but after reconnected it still should retrieve the result groups
doTestGetGroups(Arrays.asList(testGroups), 1 + 2); // 1 is the first failure call
}
@Test
public void testGetGroupsWithLdapDown() throws IOException, NamingException {
// This mocks the case where Ldap server is down, and always throws CommunicationException
when(mockContext.search(anyString(), anyString(), any(Object[].class),
any(SearchControls.class)))
.thenThrow(new CommunicationException("Connection is closed"));
// Ldap server is down, no groups should be retrieved
doTestGetGroups(Arrays.asList(new String[] {}),
1 + LdapGroupsMapping.RECONNECT_RETRY_COUNT); // 1 is the first normal call
}
private void doTestGetGroups(List<String> expectedGroups, int searchTimes) throws IOException, NamingException {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
// Set this, so we don't throw an exception // Set this, so we don't throw an exception
conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test"); conf.set(LdapGroupsMapping.LDAP_URL_KEY, "ldap://test");
@ -102,10 +131,10 @@ public class TestLdapGroupsMapping {
// regardless of input // regardless of input
List<String> groups = mappingSpy.getGroups("some_user"); List<String> groups = mappingSpy.getGroups("some_user");
Assert.assertEquals(Arrays.asList("group1", "group2"), groups); Assert.assertEquals(expectedGroups, groups);
// We should have searched for a user, and then two groups // We should have searched for a user, and then two groups
verify(mockContext, times(2)).search(anyString(), verify(mockContext, times(searchTimes)).search(anyString(),
anyString(), anyString(),
any(Object[].class), any(Object[].class),
any(SearchControls.class)); any(SearchControls.class));

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.test;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.StringWriter; import java.io.StringWriter;
import java.lang.reflect.InvocationTargetException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Random; import java.util.Random;
import java.util.Set; import java.util.Set;
@ -266,15 +267,29 @@ public abstract class GenericTestUtils {
*/ */
public static class DelegateAnswer implements Answer<Object> { public static class DelegateAnswer implements Answer<Object> {
private final Object delegate; private final Object delegate;
private final Log log;
public DelegateAnswer(Object delegate) { public DelegateAnswer(Object delegate) {
this(null, delegate);
}
public DelegateAnswer(Log log, Object delegate) {
this.log = log;
this.delegate = delegate; this.delegate = delegate;
} }
@Override @Override
public Object answer(InvocationOnMock invocation) throws Throwable { public Object answer(InvocationOnMock invocation) throws Throwable {
try {
if (log != null) {
log.info("Call to " + invocation + " on " + delegate,
new Exception("TRACE"));
}
return invocation.getMethod().invoke( return invocation.getMethod().invoke(
delegate, invocation.getArguments()); delegate, invocation.getArguments());
} catch (InvocationTargetException ite) {
throw ite.getCause();
}
} }
} }

View File

@ -181,6 +181,9 @@ Trunk (Unreleased)
HDFS-4346. Add SequentialNumber as a base class for INodeId and HDFS-4346. Add SequentialNumber as a base class for INodeId and
GenerationStamp. (szetszwo) GenerationStamp. (szetszwo)
HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
cache expires too quickly (Chris Nauroth via Sanjay)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -369,6 +372,12 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4246. The exclude node list should be more forgiving, for each output HDFS-4246. The exclude node list should be more forgiving, for each output
stream. (harsh via atm) stream. (harsh via atm)
HDFS-4635. Move BlockManager#computeCapacity to LightWeightGSet. (suresh)
HDFS-4621. Additional logging to help diagnose slow QJM syncs. (todd)
HDFS-4618. Default transaction interval for checkpoints is too low. (todd)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -448,6 +457,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4584. Skip TestNNWithQJM.testNewNamenodeTakesOverWriter() on Windows. HDFS-4584. Skip TestNNWithQJM.testNewNamenodeTakesOverWriter() on Windows.
(Arpit Agarwal via szetszwo) (Arpit Agarwal via szetszwo)
HDFS-4598. Fix the default value of ConcatSourcesParam and the WebHDFS doc.
(szetszwo)
Release 2.0.4-alpha - UNRELEASED Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -2462,6 +2474,9 @@ Release 0.23.7 - UNRELEASED
HDFS-3367. WebHDFS doesn't use the logged in user when opening HDFS-3367. WebHDFS doesn't use the logged in user when opening
connections (daryn) connections (daryn)
HDFS-4581. checkDiskError should not be called on network errors (Rohit
Kochar via kihwal)
Release 0.23.6 - UNRELEASED Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -122,7 +122,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period"; public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600; public static final long DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns"; public static final String DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000; public static final long DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 1000000;
public static final String DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY = "dfs.namenode.checkpoint.max-retries"; public static final String DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY = "dfs.namenode.checkpoint.max-retries";
public static final int DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT = 3; public static final int DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT = 3;
public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval"; public static final String DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";

View File

@ -65,6 +65,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
@ -315,13 +316,14 @@ public class DistributedFileSystem extends FileSystem {
} }
/** /**
* Move blocks from srcs to trg * Move blocks from srcs to trg and delete srcs afterwards.
* and delete srcs afterwards * The file block sizes must be the same.
* RESTRICTION: all blocks should be the same size *
* @param trg existing file to append to * @param trg existing file to append to
* @param psrcs list of files (same block size, same replication) * @param psrcs list of files (same block size, same replication)
* @throws IOException * @throws IOException
*/ */
@Override
public void concat(Path trg, Path [] psrcs) throws IOException { public void concat(Path trg, Path [] psrcs) throws IOException {
String [] srcs = new String [psrcs.length]; String [] srcs = new String [psrcs.length];
for(int i=0; i<psrcs.length; i++) { for(int i=0; i<psrcs.length; i++) {
@ -896,6 +898,17 @@ public class DistributedFileSystem extends FileSystem {
return dfs.getCanonicalServiceName(); return dfs.getCanonicalServiceName();
} }
@Override
protected URI canonicalizeUri(URI uri) {
if (HAUtil.isLogicalUri(getConf(), uri)) {
// Don't try to DNS-resolve logical URIs, since the 'authority'
// portion isn't a proper hostname
return uri;
} else {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
}
/** /**
* Utility function that returns if the NameNode is in safemode or not. In HA * Utility function that returns if the NameNode is in safemode or not. In HA
* mode, this API will return only ActiveNN's safemode status. * mode, this API will return only ActiveNN's safemode status.

View File

@ -162,6 +162,11 @@ public class HftpFileSystem extends FileSystem
return SecurityUtil.buildTokenService(nnSecureUri).toString(); return SecurityUtil.buildTokenService(nnSecureUri).toString();
} }
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
/** /**
* Return the protocol scheme for the FileSystem. * Return the protocol scheme for the FileSystem.
* <p/> * <p/>

View File

@ -134,6 +134,8 @@ public class IPCLoggerChannel implements AsyncLogger {
private static final long HEARTBEAT_INTERVAL_MILLIS = 1000; private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
private static final long WARN_JOURNAL_MILLIS_THRESHOLD = 1000;
static final Factory FACTORY = new AsyncLogger.Factory() { static final Factory FACTORY = new AsyncLogger.Factory() {
@Override @Override
public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo, public AsyncLogger createLogger(Configuration conf, NamespaceInfo nsInfo,
@ -371,6 +373,12 @@ public class IPCLoggerChannel implements AsyncLogger {
now - submitNanos, TimeUnit.NANOSECONDS); now - submitNanos, TimeUnit.NANOSECONDS);
metrics.addWriteEndToEndLatency(endToEndTime); metrics.addWriteEndToEndLatency(endToEndTime);
metrics.addWriteRpcLatency(rpcTime); metrics.addWriteRpcLatency(rpcTime);
if (rpcTime / 1000 > WARN_JOURNAL_MILLIS_THRESHOLD) {
QuorumJournalManager.LOG.warn(
"Took " + (rpcTime / 1000) + "ms to send a batch of " +
numTxns + " edits (" + data.length + " bytes) to " +
"remote journal " + IPCLoggerChannel.this);
}
} }
synchronized (IPCLoggerChannel.this) { synchronized (IPCLoggerChannel.this) {
highestAckedTxId = firstTxnId + numTxns - 1; highestAckedTxId = firstTxnId + numTxns - 1;

View File

@ -24,6 +24,7 @@ import java.util.concurrent.TimeoutException;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import com.google.common.util.concurrent.FutureCallback; import com.google.common.util.concurrent.FutureCallback;
@ -120,6 +121,15 @@ class QuorumCall<KEY, RESULT> {
String msg = String.format( String msg = String.format(
"Waited %s ms (timeout=%s ms) for a response for %s", "Waited %s ms (timeout=%s ms) for a response for %s",
waited, millis, operationName); waited, millis, operationName);
if (!successes.isEmpty()) {
msg += ". Succeeded so far: [" + Joiner.on(",").join(successes.keySet()) + "]";
}
if (!exceptions.isEmpty()) {
msg += ". Exceptions so far: [" + getExceptionMapString() + "]";
}
if (successes.isEmpty() && exceptions.isEmpty()) {
msg += ". No responses yet.";
}
if (waited > millis * WAIT_PROGRESS_WARN_THRESHOLD) { if (waited > millis * WAIT_PROGRESS_WARN_THRESHOLD) {
QuorumJournalManager.LOG.warn(msg); QuorumJournalManager.LOG.warn(msg);
} else { } else {
@ -227,4 +237,22 @@ class QuorumCall<KEY, RESULT> {
} }
return sb.toString(); return sb.toString();
} }
/**
* Return a string suitable for displaying to the user, containing
* any exceptions that have been received so far.
*/
private String getExceptionMapString() {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Map.Entry<KEY, Throwable> e : exceptions.entrySet()) {
if (!first) {
sb.append(", ");
}
first = false;
sb.append(e.getKey()).append(": ")
.append(e.getValue().getLocalizedMessage());
}
return sb.toString();
}
} }

View File

@ -128,6 +128,10 @@ class Journal implements Closeable {
private final JournalMetrics metrics; private final JournalMetrics metrics;
/**
* Time threshold for sync calls, beyond which a warning should be logged to the console.
*/
private static final int WARN_SYNC_MILLIS_THRESHOLD = 1000;
Journal(File logDir, String journalId, Journal(File logDir, String journalId,
StorageErrorReporter errorReporter) throws IOException { StorageErrorReporter errorReporter) throws IOException {
@ -370,6 +374,10 @@ class Journal implements Closeable {
sw.stop(); sw.stop();
metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS)); metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
" took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
}
if (isLagging) { if (isLagging) {
// This batch of edits has already been committed on a quorum of other // This batch of edits has already been committed on a quorum of other

View File

@ -235,6 +235,7 @@ public class BlockManager {
heartbeatManager = datanodeManager.getHeartbeatManager(); heartbeatManager = datanodeManager.getHeartbeatManager();
invalidateBlocks = new InvalidateBlocks(datanodeManager); invalidateBlocks = new InvalidateBlocks(datanodeManager);
// Compute the map capacity by allocating 2% of total memory
blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR); blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR);
blockplacement = BlockPlacementPolicy.getInstance( blockplacement = BlockPlacementPolicy.getInstance(
conf, stats, datanodeManager.getNetworkTopology()); conf, stats, datanodeManager.getNetworkTopology());

View File

@ -60,38 +60,11 @@ class BlocksMap {
private GSet<Block, BlockInfo> blocks; private GSet<Block, BlockInfo> blocks;
BlocksMap(final float loadFactor) { BlocksMap(final float loadFactor) {
this.capacity = computeCapacity(); // Use 2% of total memory to size the GSet capacity
this.capacity = LightWeightGSet.computeCapacity(2.0, "BlocksMap");
this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity); this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
} }
/**
* Let t = 2% of max memory.
* Let e = round(log_2 t).
* Then, we choose capacity = 2^e/(size of reference),
* unless it is outside the close interval [1, 2^30].
*/
private static int computeCapacity() {
//VM detection
//See http://java.sun.com/docs/hotspot/HotSpotFAQ.html#64bit_detection
final String vmBit = System.getProperty("sun.arch.data.model");
//2% of max memory
final double twoPC = Runtime.getRuntime().maxMemory()/50.0;
//compute capacity
final int e1 = (int)(Math.log(twoPC)/Math.log(2.0) + 0.5);
final int e2 = e1 - ("32".equals(vmBit)? 2: 3);
final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
final int c = 1 << exponent;
if (LightWeightGSet.LOG.isDebugEnabled()) {
LightWeightGSet.LOG.debug("VM type = " + vmBit + "-bit");
LightWeightGSet.LOG.debug("2% max memory = " + twoPC/(1 << 20) + " MB");
LightWeightGSet.LOG.debug("capacity = 2^" + exponent
+ " = " + c + " entries");
}
return c;
}
void close() { void close() {
// Empty blocks once GSet#clear is implemented (HDFS-3940) // Empty blocks once GSet#clear is implemented (HDFS-3940)

View File

@ -60,8 +60,11 @@ import java.io.PrintStream;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.ServerSocket; import java.net.ServerSocket;
import java.net.Socket; import java.net.Socket;
import java.net.SocketException;
import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.net.UnknownHostException; import java.net.UnknownHostException;
import java.nio.channels.ClosedByInterruptException;
import java.nio.channels.ServerSocketChannel; import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel; import java.nio.channels.SocketChannel;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -1172,7 +1175,13 @@ public class DataNode extends Configured
protected void checkDiskError(Exception e ) throws IOException { protected void checkDiskError(Exception e ) throws IOException {
LOG.warn("checkDiskError: exception: ", e); LOG.warn("checkDiskError: exception: ", e);
if (e instanceof SocketException || e instanceof SocketTimeoutException
|| e instanceof ClosedByInterruptException
|| e.getMessage().startsWith("Broken pipe")) {
LOG.info("Not checking disk as checkDiskError was called on a network" +
" related exception");
return;
}
if (e.getMessage() != null && if (e.getMessage() != null &&
e.getMessage().startsWith("No space left on device")) { e.getMessage().startsWith("No space left on device")) {
throw new DiskOutOfSpaceException("No space left on device"); throw new DiskOutOfSpaceException("No space left on device");
@ -1485,7 +1494,11 @@ public class DataNode extends Configured
LOG.warn(bpReg + ":Failed to transfer " + b + " to " + LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
targets[0] + " got ", ie); targets[0] + " got ", ie);
// check if there are any disk problem // check if there are any disk problem
checkDiskError(); try{
checkDiskError(ie);
} catch(IOException e) {
LOG.warn("DataNode.checkDiskError failed in run() with: ", e);
}
} finally { } finally {
xmitsInProgress.getAndDecrement(); xmitsInProgress.getAndDecrement();

View File

@ -24,8 +24,11 @@ import java.util.Iterator;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import com.google.common.annotations.VisibleForTesting;
/** /**
* A low memory footprint {@link GSet} implementation, * A low memory footprint {@link GSet} implementation,
* which uses an array for storing the elements * which uses an array for storing the elements
@ -285,4 +288,54 @@ public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
throw new UnsupportedOperationException("Remove is not supported."); throw new UnsupportedOperationException("Remove is not supported.");
} }
} }
/**
* Let t = percentage of max memory.
* Let e = round(log_2 t).
* Then, we choose capacity = 2^e/(size of reference),
* unless it is outside the close interval [1, 2^30].
*/
public static int computeCapacity(double percentage, String mapName) {
return computeCapacity(Runtime.getRuntime().maxMemory(), percentage,
mapName);
}
@VisibleForTesting
static int computeCapacity(long maxMemory, double percentage,
String mapName) {
if (percentage > 100.0 || percentage < 0.0) {
throw new HadoopIllegalArgumentException("Percentage " + percentage
+ " must be greater than or equal to 0 "
+ " and less than or equal to 100");
}
if (maxMemory < 0) {
throw new HadoopIllegalArgumentException("Memory " + maxMemory
+ " must be greater than or equal to 0");
}
if (percentage == 0.0 || maxMemory == 0) {
return 0;
}
//VM detection
//See http://java.sun.com/docs/hotspot/HotSpotFAQ.html#64bit_detection
final String vmBit = System.getProperty("sun.arch.data.model");
//Percentage of max memory
final double percentDivisor = 100.0/percentage;
final double percentMemory = maxMemory/percentDivisor;
//compute capacity
final int e1 = (int)(Math.log(percentMemory)/Math.log(2.0) + 0.5);
final int e2 = e1 - ("32".equals(vmBit)? 2: 3);
final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
final int c = 1 << exponent;
if (LightWeightGSet.LOG.isDebugEnabled()) {
LOG.debug("Computing capacity for map " + mapName);
LOG.debug("VM type = " + vmBit + "-bit");
LOG.debug(percentage + "% max memory = "
+ StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1));
LOG.debug("capacity = 2^" + exponent + " = " + c + " entries");
}
return c;
}
} }

View File

@ -30,7 +30,6 @@ import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.net.URL; import java.net.URL;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -64,7 +63,33 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam;
import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.OwnerParam;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.hdfs.web.resources.PermissionParam;
import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryUtils; import org.apache.hadoop.io.retry.RetryUtils;
@ -82,7 +107,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
import org.mortbay.util.ajax.JSON; import org.mortbay.util.ajax.JSON;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
@ -215,6 +239,11 @@ public class WebHdfsFileSystem extends FileSystem
return this.uri; return this.uri;
} }
@Override
protected URI canonicalizeUri(URI uri) {
return NetUtils.getCanonicalUri(uri, getDefaultPort());
}
/** @return the home directory. */ /** @return the home directory. */
public static String getHomeDirectoryString(final UserGroupInformation ugi) { public static String getHomeDirectoryString(final UserGroupInformation ugi) {
return "/user/" + ugi.getShortUserName(); return "/user/" + ugi.getShortUserName();
@ -729,17 +758,10 @@ public class WebHdfsFileSystem extends FileSystem
} }
@Override @Override
public void concat(final Path trg, final Path [] psrcs) throws IOException { public void concat(final Path trg, final Path [] srcs) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PostOpParam.Op.CONCAT; final HttpOpParam.Op op = PostOpParam.Op.CONCAT;
List<String> strPaths = new ArrayList<String>(psrcs.length);
for(Path psrc : psrcs) {
strPaths.add(psrc.toUri().getPath());
}
String srcs = StringUtils.join(",", strPaths);
ConcatSourcesParam param = new ConcatSourcesParam(srcs); ConcatSourcesParam param = new ConcatSourcesParam(srcs);
run(op, trg, param); run(op, trg, param);
} }

View File

@ -18,15 +18,28 @@
package org.apache.hadoop.hdfs.web.resources; package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.fs.Path;
/** The concat source paths parameter. */ /** The concat source paths parameter. */
public class ConcatSourcesParam extends StringParam { public class ConcatSourcesParam extends StringParam {
/** Parameter name. */ /** Parameter name. */
public static final String NAME = "sources"; public static final String NAME = "sources";
public static final String DEFAULT = NULL; public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null); private static final Domain DOMAIN = new Domain(NAME, null);
private static String paths2String(Path[] paths) {
if (paths == null || paths.length == 0) {
return "";
}
final StringBuilder b = new StringBuilder(paths[0].toUri().getPath());
for(int i = 1; i < paths.length; i++) {
b.append(',').append(paths[i].toUri().getPath());
}
return b.toString();
}
/** /**
* Constructor. * Constructor.
* @param str a string representation of the parameter value. * @param str a string representation of the parameter value.
@ -35,6 +48,10 @@ public class ConcatSourcesParam extends StringParam {
super(DOMAIN, str); super(DOMAIN, str);
} }
public ConcatSourcesParam(Path[] paths) {
this(paths2String(paths));
}
@Override @Override
public String getName() { public String getName() {
return NAME; return NAME;

View File

@ -635,7 +635,7 @@
<property> <property>
<name>dfs.namenode.checkpoint.txns</name> <name>dfs.namenode.checkpoint.txns</name>
<value>40000</value> <value>1000000</value>
<description>The Secondary NameNode or CheckpointNode will create a checkpoint <description>The Secondary NameNode or CheckpointNode will create a checkpoint
of the namespace every 'dfs.namenode.checkpoint.txns' transactions, regardless of the namespace every 'dfs.namenode.checkpoint.txns' transactions, regardless
of whether 'dfs.namenode.checkpoint.period' has expired. of whether 'dfs.namenode.checkpoint.period' has expired.

View File

@ -193,7 +193,7 @@ HDFS Users Guide
* <<<dfs.namenode.checkpoint.period>>>, set to 1 hour by default, specifies * <<<dfs.namenode.checkpoint.period>>>, set to 1 hour by default, specifies
the maximum delay between two consecutive checkpoints, and the maximum delay between two consecutive checkpoints, and
* <<<dfs.namenode.checkpoint.txns>>>, set to 40000 default, defines the * <<<dfs.namenode.checkpoint.txns>>>, set to 1 million by default, defines the
number of uncheckpointed transactions on the NameNode which will number of uncheckpointed transactions on the NameNode which will
force an urgent checkpoint, even if the checkpoint period has not force an urgent checkpoint, even if the checkpoint period has not
been reached. been reached.
@ -232,7 +232,7 @@ HDFS Users Guide
* <<<dfs.namenode.checkpoint.period>>>, set to 1 hour by default, specifies * <<<dfs.namenode.checkpoint.period>>>, set to 1 hour by default, specifies
the maximum delay between two consecutive checkpoints the maximum delay between two consecutive checkpoints
* <<<dfs.namenode.checkpoint.txns>>>, set to 40000 default, defines the * <<<dfs.namenode.checkpoint.txns>>>, set to 1 million by default, defines the
number of uncheckpointed transactions on the NameNode which will number of uncheckpointed transactions on the NameNode which will
force an urgent checkpoint, even if the checkpoint period has not force an urgent checkpoint, even if the checkpoint period has not
been reached. been reached.

View File

@ -109,7 +109,7 @@ WebHDFS REST API
* {{{Append to a File}<<<APPEND>>>}} * {{{Append to a File}<<<APPEND>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append) (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
* {{{Concat File(s)}<<<CONCAT>>>}} * {{{Concatenate Files}<<<CONCAT>>>}}
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat) (see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
* HTTP DELETE * HTTP DELETE
@ -307,7 +307,7 @@ Content-Length: 0
* Submit a HTTP POST request. * Submit a HTTP POST request.
+--------------------------------- +---------------------------------
curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<SOURCES>" curl -i -X POST "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CONCAT&sources=<PATHS>"
+--------------------------------- +---------------------------------
The client receives a response with zero content length: The client receives a response with zero content length:
@ -319,10 +319,6 @@ Content-Length: 0
[] []
This REST API call is available as of Hadoop version 2.0.3.
Please note that <SOURCES> is a comma seperated list of absolute paths.
(Example: sources=/test/file1,/test/file2,/test/file3)
See also: See also:
{{{Sources}<<<sources>>>}}, {{{Sources}<<<sources>>>}},
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat
@ -1761,7 +1757,7 @@ var tokenProperties =
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
|| Name | <<<sources>>> | || Name | <<<sources>>> |
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
|| Description | The comma seperated absolute paths used for concatenation. | || Description | A list of source paths. |
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
|| Type | String | || Type | String |
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
@ -1769,12 +1765,9 @@ var tokenProperties =
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
|| Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. | || Valid Values | A list of comma seperated absolute FileSystem paths without scheme and authority. |
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
|| Syntax | See the note in {{Delegation}}. | || Syntax | Any string. |
*----------------+-------------------------------------------------------------------+ *----------------+-------------------------------------------------------------------+
<<Note>> that sources are absolute FileSystem paths.
See also: See also:
{{{Concat File(s)}<<<CONCAT>>>}} {{{Concat File(s)}<<<CONCAT>>>}}

View File

@ -32,6 +32,8 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.util.ThreadUtil; import org.apache.hadoop.util.ThreadUtil;
import org.junit.After;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
@ -41,10 +43,25 @@ import org.junit.Test;
*/ */
public class TestDFSClientExcludedNodes { public class TestDFSClientExcludedNodes {
@Test(timeout=10000) private MiniDFSCluster cluster;
private Configuration conf;
@Before
public void setUp() {
cluster = null;
conf = new HdfsConfiguration();
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testExcludedNodes() throws IOException { public void testExcludedNodes() throws IOException {
Configuration conf = new HdfsConfiguration(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testExcludedNodes"); Path filePath = new Path("/testExcludedNodes");
@ -67,17 +84,16 @@ public class TestDFSClientExcludedNodes {
} }
} }
@Test(timeout=10000) @Test(timeout=60000)
public void testExcludedNodesForgiveness() throws IOException { public void testExcludedNodesForgiveness() throws IOException {
Configuration conf = new HdfsConfiguration(); // Forgive nodes in under 2.5s for this test case.
// Forgive nodes in under 1s for this test case.
conf.setLong( conf.setLong(
DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL, DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
1000); 2500);
// We'll be using a 512 bytes block size just for tests // We'll be using a 512 bytes block size just for tests
// so making sure the checksum bytes too match it. // so making sure the checksum bytes too match it.
conf.setInt("io.bytes.per.checksum", 512); conf.setInt("io.bytes.per.checksum", 512);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
List<DataNodeProperties> props = cluster.dataNodes; List<DataNodeProperties> props = cluster.dataNodes;
FileSystem fs = cluster.getFileSystem(); FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/testForgivingExcludedNodes"); Path filePath = new Path("/testForgivingExcludedNodes");
@ -112,11 +128,11 @@ public class TestDFSClientExcludedNodes {
Assert.assertEquals(true, cluster.restartDataNode(two, true)); Assert.assertEquals(true, cluster.restartDataNode(two, true));
cluster.waitActive(); cluster.waitActive();
// Sleep for 2s, to let the excluded nodes be expired // Sleep for 5s, to let the excluded nodes be expired
// from the excludes list (i.e. forgiven after the configured wait period). // from the excludes list (i.e. forgiven after the configured wait period).
// [Sleeping just in case the restart of the DNs completed < 2s cause // [Sleeping just in case the restart of the DNs completed < 5s cause
// otherwise, we'll end up quickly excluding those again.] // otherwise, we'll end up quickly excluding those again.]
ThreadUtil.sleepAtLeastIgnoreInterrupts(2000); ThreadUtil.sleepAtLeastIgnoreInterrupts(5000);
// Terminate the last good DN, to assert that there's no // Terminate the last good DN, to assert that there's no
// single-DN-available scenario, caused by not forgiving the other // single-DN-available scenario, caused by not forgiving the other

View File

@ -26,8 +26,11 @@ import java.io.IOException;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.Socket; import java.net.Socket;
import java.net.SocketAddress; import java.net.SocketAddress;
import java.lang.reflect.Field;
import java.net.InetAddress;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.List;
import javax.net.SocketFactory; import javax.net.SocketFactory;
@ -35,6 +38,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
@ -48,10 +52,13 @@ import org.apache.hadoop.util.StringUtils;
import org.hamcrest.BaseMatcher; import org.hamcrest.BaseMatcher;
import org.hamcrest.Description; import org.hamcrest.Description;
import org.junit.After; import org.junit.After;
import org.junit.Assume;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import sun.net.spi.nameservice.NameService;
public class TestDFSClientFailover { public class TestDFSClientFailover {
private static final Log LOG = LogFactory.getLog(TestDFSClientFailover.class); private static final Log LOG = LogFactory.getLog(TestDFSClientFailover.class);
@ -201,4 +208,74 @@ public class TestDFSClientFailover {
"Could not find any configured addresses for URI " + uri)); "Could not find any configured addresses for URI " + uri));
} }
} }
/**
* Spy on the Java DNS infrastructure.
* This likely only works on Sun-derived JDKs, but uses JUnit's
* Assume functionality so that any tests using it are skipped on
* incompatible JDKs.
*/
private NameService spyOnNameService() {
try {
Field f = InetAddress.class.getDeclaredField("nameServices");
f.setAccessible(true);
Assume.assumeNotNull(f);
@SuppressWarnings("unchecked")
List<NameService> nsList = (List<NameService>) f.get(null);
NameService ns = nsList.get(0);
Log log = LogFactory.getLog("NameServiceSpy");
ns = Mockito.mock(NameService.class,
new GenericTestUtils.DelegateAnswer(log, ns));
nsList.set(0, ns);
return ns;
} catch (Throwable t) {
LOG.info("Unable to spy on DNS. Skipping test.", t);
// In case the JDK we're testing on doesn't work like Sun's, just
// skip the test.
Assume.assumeNoException(t);
throw new RuntimeException(t);
}
}
/**
* Test that the client doesn't ever try to DNS-resolve the logical URI.
* Regression test for HADOOP-9150.
*/
@Test
public void testDoesntDnsResolveLogicalURI() throws Exception {
NameService spyNS = spyOnNameService();
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
String logicalHost = fs.getUri().getHost();
Path qualifiedRoot = fs.makeQualified(new Path("/"));
// Make a few calls against the filesystem.
fs.getCanonicalServiceName();
fs.listStatus(qualifiedRoot);
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
/**
* Same test as above, but for FileContext.
*/
@Test
public void testFileContextDoesntDnsResolveLogicalURI() throws Exception {
NameService spyNS = spyOnNameService();
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
String logicalHost = fs.getUri().getHost();
Configuration haClientConf = fs.getConf();
FileContext fc = FileContext.getFileContext(haClientConf);
Path root = new Path("/");
fc.listStatus(root);
fc.listStatus(fc.makeQualified(root));
fc.getDefaultFileSystem().getCanonicalServiceName();
// Ensure that the logical hostname was never resolved.
Mockito.verify(spyNS, Mockito.never()).lookupAllHostAddr(Mockito.eq(logicalHost));
}
} }

View File

@ -21,6 +21,7 @@ import java.util.ConcurrentModificationException;
import java.util.Iterator; import java.util.Iterator;
import java.util.Random; import java.util.Random;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -452,4 +453,81 @@ public class TestGSet {
next = e; next = e;
} }
} }
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid percent less than 0.
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityNegativePercent() {
LightWeightGSet.computeCapacity(1024, -1.0, "testMap");
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid percent greater than 100.
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityInvalidPercent() {
LightWeightGSet.computeCapacity(1024, 101.0, "testMap");
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
* with invalid negative max memory
*/
@Test(expected=HadoopIllegalArgumentException.class)
public void testComputeCapacityInvalidMemory() {
LightWeightGSet.computeCapacity(-1, 50.0, "testMap");
}
private static boolean isPowerOfTwo(int num) {
return num == 0 || (num > 0 && Integer.bitCount(num) == 1);
}
/** Return capacity as percentage of total memory */
private static int getPercent(long total, int capacity) {
// Reference size in bytes
double referenceSize =
System.getProperty("sun.arch.data.model").equals("32") ? 4.0 : 8.0;
return (int)(((capacity * referenceSize)/total) * 100.0);
}
/** Return capacity as percentage of total memory */
private static void testCapacity(long maxMemory, double percent) {
int capacity = LightWeightGSet.computeCapacity(maxMemory, percent, "map");
LightWeightGSet.LOG.info("Validating - total memory " + maxMemory + " percent "
+ percent + " returned capacity " + capacity);
// Returned capacity is zero or power of two
Assert.assertTrue(isPowerOfTwo(capacity));
// Ensure the capacity returned is the nearest to the asked perecentage
int capacityPercent = getPercent(maxMemory, capacity);
if (capacityPercent == percent) {
return;
} else if (capacityPercent > percent) {
Assert.assertTrue(getPercent(maxMemory, capacity * 2) > percent);
} else {
Assert.assertTrue(getPercent(maxMemory, capacity / 2) < percent);
}
}
/**
* Test for {@link LightWeightGSet#computeCapacity(double, String)}
*/
@Test
public void testComputeCapacity() {
// Tests for boundary conditions where percent or memory are zero
testCapacity(0, 0.0);
testCapacity(100, 0.0);
testCapacity(0, 100.0);
// Compute capacity for some 100 random max memory and percentage
Random r = new Random();
for (int i = 0; i < 100; i++) {
long maxMemory = r.nextInt(Integer.MAX_VALUE);
double percent = r.nextInt(101);
testCapacity(maxMemory, percent);
}
}
} }

View File

@ -17,18 +17,22 @@
*/ */
package org.apache.hadoop.hdfs.web.resources; package org.apache.hadoop.hdfs.web.resources;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.util.Arrays;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
public class TestParam { public class TestParam {
public static final Log LOG = LogFactory.getLog(TestParam.class); public static final Log LOG = LogFactory.getLog(TestParam.class);
@ -265,4 +269,20 @@ public class TestParam {
UserParam userParam = new UserParam("a$"); UserParam userParam = new UserParam("a$");
assertNotNull(userParam.getValue()); assertNotNull(userParam.getValue());
} }
@Test
public void testConcatSourcesParam() {
final String[] strings = {"/", "/foo", "/bar"};
for(int n = 0; n < strings.length; n++) {
final String[] sub = new String[n];
final Path[] paths = new Path[n];
for(int i = 0; i < paths.length; i++) {
paths[i] = new Path(sub[i] = strings[i]);
}
final String expected = StringUtils.join(",", Arrays.asList(sub));
final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
Assert.assertEquals(expected, computed.getValue());
}
}
} }

View File

@ -14,10 +14,6 @@ Trunk (Unreleased)
MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions
with poor implementations of Object#hashCode(). (Radim Kolar via cutting) with poor implementations of Object#hashCode(). (Radim Kolar via cutting)
HADOOP-8562. Enhancements to support Hadoop on Windows Server and Windows
Azure environments. (See breakdown of tasks below for subtasks and
contributors)
IMPROVEMENTS IMPROVEMENTS
MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
@ -75,9 +71,6 @@ Trunk (Unreleased)
MAPREDUCE-4735. Make arguments in TestDFSIO case insensitive. MAPREDUCE-4735. Make arguments in TestDFSIO case insensitive.
(Brandon Li via suresh) (Brandon Li via suresh)
MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing.
(Srikanth Sundarrajan via amareshwari)
BUG FIXES BUG FIXES
MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant. MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
@ -162,31 +155,10 @@ Trunk (Unreleased)
MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen
via suresh) via suresh)
MAPREDUCE-5078. TestMRAppMaster fails on Windows due to mismatched path MAPREDUCE-5006. Fix failing streaming tests due to MAPREDUCE-4994.
separators. (Chris Nauroth via sseth) (Sandy Ryza via tomwhite)
BREAKDOWN OF HADOOP-8562 SUBTASKS Release 2.0.4-beta - UNRELEASED
MAPREDUCE-4739. Some MapReduce tests fail to find winutils.
(Chris Nauroth via suresh)
MAPREDUCE-4780. MapReduce distribution build fails on Windows.
(Chris Nauroth via suresh)
MAPREDUCE-4790. MapReduce build script would be more readable using abspath.
(Chris Nauroth via suresh)
MAPREDUCE-4869. Fix TestMapReduceChildJVM. (Chris Nauroth via acmurthy)
MAPREDUCE-4870. Fix TestMRJobsWithHistoryService. (Chris Nauroth via acmurthy)
MAPREDUCE-4983. Fixed various platform specific assumptions in various tests,
so that they can pass on Windows too. (Chris Nauroth via vinodkv)
HADOOP-9372. Fix bad timeout annotations on tests.
(Arpit Agarwal via suresh)
Release 2.0.5-beta - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -197,12 +169,6 @@ Release 2.0.5-beta - UNRELEASED
MAPREDUCE-5033. mapred shell script should respect usage flags MAPREDUCE-5033. mapred shell script should respect usage flags
(--help -help -h). (Andrew Wang via atm) (--help -help -h). (Andrew Wang via atm)
MAPREDUCE-4892. Modify CombineFileInputFormat to not skew input slits'
allocation on small clusters. (Bikas Saha via vinodkv)
MAPREDUCE-4990. Construct debug strings conditionally in
ShuffleHandler.Shuffle#sendMapOutput(). (kkambatl via tucu)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -229,42 +195,6 @@ Release 2.0.5-beta - UNRELEASED
MAPREDUCE-5008. Merger progress miscounts with respect to EOF_MARKER. MAPREDUCE-5008. Merger progress miscounts with respect to EOF_MARKER.
(Sandy Ryza via tomwhite) (Sandy Ryza via tomwhite)
MAPREDUCE-4693. History server should include counters for failed tasks.
(Xuan Gong via sseth)
MAPREDUCE-4896. mapred queue -info spits out ugly exception when queue does
not exist. (sandyr via tucu)
MAPREDUCE-3685. Fix bugs in MergeManager to ensure compression codec is
appropriately used and that on-disk segments are correctly sorted on
file-size. (Anty Rao and Ravi Prakash via acmurthy)
MAPREDUCE-4571. TestHsWebServicesJobs fails on jdk7. (tgraves via tucu)
MAPREDUCE-4716. TestHsWebServicesJobsQuery.testJobsQueryStateInvalid
fails with jdk7. (tgraves via tucu)
MAPREDUCE-5075. DistCp leaks input file handles since ThrottledInputStream
does not close the wrapped InputStream. (Chris Nauroth via szetszwo)
MAPREDUCE-3872. Fix an event handling races in ContainerLauncherImpl.
(Robert Kanter via sseth)
MAPREDUCE-5083. MiniMRCluster should use a random component when creating an
actual cluster (Siddharth Seth via hitesh)
Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES
NEW FEATURES
IMPROVEMENTS
OPTIMIZATIONS
BUG FIXES
Release 2.0.3-alpha - 2013-02-06 Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -795,12 +725,6 @@ Release 0.23.7 - UNRELEASED
MAPREDUCE-4989. JSONify DataTables input data for Attempts page (Ravi MAPREDUCE-4989. JSONify DataTables input data for Attempts page (Ravi
Prakash via jlowe) Prakash via jlowe)
MAPREDUCE-5027. Shuffle does not limit number of outstanding connections
(Robert Parker via jeagles)
MAPREDUCE-4972. Coverage fixing for org.apache.hadoop.mapreduce.jobhistory
(Aleksey Gorshkov via bobby)
OPTIMIZATIONS OPTIMIZATIONS
MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
@ -820,27 +744,6 @@ Release 0.23.7 - UNRELEASED
MAPREDUCE-5009. Killing the Task Attempt slated for commit does not clear MAPREDUCE-5009. Killing the Task Attempt slated for commit does not clear
the value from the Task commitAttempt member (Robert Parker via jeagles) the value from the Task commitAttempt member (Robert Parker via jeagles)
MAPREDUCE-4871. AM uses mapreduce.jobtracker.split.metainfo.maxsize but
mapred-default has mapreduce.job.split.metainfo.maxsize (Jason Lowe via
jeagles)
MAPREDUCE-4794. DefaultSpeculator generates error messages on normal
shutdown (Jason Lowe via jeagles)
MAPREDUCE-5043. Fetch failure processing can cause AM event queue to
backup and eventually OOM (Jason Lowe via bobby)
MAPREDUCE-5023. History Server Web Services missing Job Counters (Ravi
Prakash via tgraves)
MAPREDUCE-5060. Fetch failures that time out only count against the first
map task (Robert Joseph Evans via jlowe)
MAPREDUCE-5042. Reducer unable to fetch for a map task that was recovered
(Jason Lowe via bobby)
MAPREDUCE-5053. java.lang.InternalError from decompression codec cause
reducer to fail (Robert Parker via jeagles)
Release 0.23.6 - UNRELEASED Release 0.23.6 - UNRELEASED

View File

@ -161,6 +161,7 @@ public class MRAppMaster extends CompositeService {
private final int nmPort; private final int nmPort;
private final int nmHttpPort; private final int nmHttpPort;
protected final MRAppMetrics metrics; protected final MRAppMetrics metrics;
private final int maxAppAttempts;
private Map<TaskId, TaskInfo> completedTasksFromPreviousRun; private Map<TaskId, TaskInfo> completedTasksFromPreviousRun;
private List<AMInfo> amInfos; private List<AMInfo> amInfos;
private AppContext context; private AppContext context;
@ -194,14 +195,14 @@ public class MRAppMaster extends CompositeService {
public MRAppMaster(ApplicationAttemptId applicationAttemptId, public MRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
long appSubmitTime) { long appSubmitTime, int maxAppAttempts) {
this(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort, this(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort,
new SystemClock(), appSubmitTime); new SystemClock(), appSubmitTime, maxAppAttempts);
} }
public MRAppMaster(ApplicationAttemptId applicationAttemptId, public MRAppMaster(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String nmHost, int nmPort, int nmHttpPort, ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
Clock clock, long appSubmitTime) { Clock clock, long appSubmitTime, int maxAppAttempts) {
super(MRAppMaster.class.getName()); super(MRAppMaster.class.getName());
this.clock = clock; this.clock = clock;
this.startTime = clock.getTime(); this.startTime = clock.getTime();
@ -212,6 +213,7 @@ public class MRAppMaster extends CompositeService {
this.nmPort = nmPort; this.nmPort = nmPort;
this.nmHttpPort = nmHttpPort; this.nmHttpPort = nmHttpPort;
this.metrics = MRAppMetrics.create(); this.metrics = MRAppMetrics.create();
this.maxAppAttempts = maxAppAttempts;
LOG.info("Created MRAppMaster for application " + applicationAttemptId); LOG.info("Created MRAppMaster for application " + applicationAttemptId);
} }
@ -221,17 +223,12 @@ public class MRAppMaster extends CompositeService {
downloadTokensAndSetupUGI(conf); downloadTokensAndSetupUGI(conf);
//TODO this is a hack, we really need the RM to inform us when we isLastAMRetry = appAttemptID.getAttemptId() >= maxAppAttempts;
// are the last one. This would allow us to configure retries on LOG.info("The specific max attempts: " + maxAppAttempts +
// a per application basis. " for application: " + appAttemptID.getApplicationId().getId() +
int numAMRetries = conf.getInt(YarnConfiguration.RM_AM_MAX_RETRIES, ". Attempt num: " + appAttemptID.getAttemptId() +
YarnConfiguration.DEFAULT_RM_AM_MAX_RETRIES);
isLastAMRetry = appAttemptID.getAttemptId() >= numAMRetries;
LOG.info("AM Retries: " + numAMRetries +
" attempt num: " + appAttemptID.getAttemptId() +
" is last retry: " + isLastAMRetry); " is last retry: " + isLastAMRetry);
context = new RunningAppContext(conf); context = new RunningAppContext(conf);
// Job name is the same as the app name util we support DAG of jobs // Job name is the same as the app name util we support DAG of jobs
@ -266,6 +263,9 @@ public class MRAppMaster extends CompositeService {
boolean commitFailure = fs.exists(endCommitFailureFile); boolean commitFailure = fs.exists(endCommitFailureFile);
if(!stagingExists) { if(!stagingExists) {
isLastAMRetry = true; isLastAMRetry = true;
LOG.info("Attempt num: " + appAttemptID.getAttemptId() +
" is last retry: " + isLastAMRetry +
" because the staging dir doesn't exist.");
errorHappenedShutDown = true; errorHappenedShutDown = true;
forcedState = JobStateInternal.ERROR; forcedState = JobStateInternal.ERROR;
shutDownMessage = "Staging dir does not exist " + stagingDir; shutDownMessage = "Staging dir does not exist " + stagingDir;
@ -275,6 +275,9 @@ public class MRAppMaster extends CompositeService {
// what result we will use to notify, and how we will unregister // what result we will use to notify, and how we will unregister
errorHappenedShutDown = true; errorHappenedShutDown = true;
isLastAMRetry = true; isLastAMRetry = true;
LOG.info("Attempt num: " + appAttemptID.getAttemptId() +
" is last retry: " + isLastAMRetry +
" because a commit was started.");
copyHistory = true; copyHistory = true;
if (commitSuccess) { if (commitSuccess) {
shutDownMessage = "We crashed after successfully committing. Recovering."; shutDownMessage = "We crashed after successfully committing. Recovering.";
@ -777,6 +780,10 @@ public class MRAppMaster extends CompositeService {
return taskAttemptListener; return taskAttemptListener;
} }
public Boolean isLastAMRetry() {
return isLastAMRetry;
}
/** /**
* By the time life-cycle of this router starts, job-init would have already * By the time life-cycle of this router starts, job-init would have already
* happened. * happened.
@ -1206,6 +1213,8 @@ public class MRAppMaster extends CompositeService {
System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV); System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV);
String appSubmitTimeStr = String appSubmitTimeStr =
System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV); System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);
String maxAppAttempts =
System.getenv(ApplicationConstants.MAX_APP_ATTEMPTS_ENV);
validateInputParam(containerIdStr, validateInputParam(containerIdStr,
ApplicationConstants.AM_CONTAINER_ID_ENV); ApplicationConstants.AM_CONTAINER_ID_ENV);
@ -1215,6 +1224,8 @@ public class MRAppMaster extends CompositeService {
ApplicationConstants.NM_HTTP_PORT_ENV); ApplicationConstants.NM_HTTP_PORT_ENV);
validateInputParam(appSubmitTimeStr, validateInputParam(appSubmitTimeStr,
ApplicationConstants.APP_SUBMIT_TIME_ENV); ApplicationConstants.APP_SUBMIT_TIME_ENV);
validateInputParam(maxAppAttempts,
ApplicationConstants.MAX_APP_ATTEMPTS_ENV);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
ApplicationAttemptId applicationAttemptId = ApplicationAttemptId applicationAttemptId =
@ -1224,7 +1235,8 @@ public class MRAppMaster extends CompositeService {
MRAppMaster appMaster = MRAppMaster appMaster =
new MRAppMaster(applicationAttemptId, containerId, nodeHostString, new MRAppMaster(applicationAttemptId, containerId, nodeHostString,
Integer.parseInt(nodePortString), Integer.parseInt(nodePortString),
Integer.parseInt(nodeHttpPortString), appSubmitTime); Integer.parseInt(nodeHttpPortString), appSubmitTime,
Integer.parseInt(maxAppAttempts));
ShutdownHookManager.get().addShutdownHook( ShutdownHookManager.get().addShutdownHook(
new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY); new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf()); YarnConfiguration conf = new YarnConfiguration(new JobConf());

View File

@ -54,7 +54,6 @@ import org.apache.hadoop.yarn.util.BuilderUtils;
public abstract class RMContainerRequestor extends RMCommunicator { public abstract class RMContainerRequestor extends RMCommunicator {
private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class); private static final Log LOG = LogFactory.getLog(RMContainerRequestor.class);
static final String ANY = "*";
private int lastResponseID; private int lastResponseID;
private Resource availableResources; private Resource availableResources;
@ -278,7 +277,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
} }
// Off-switch // Off-switch
addResourceRequest(req.priority, ANY, req.capability); addResourceRequest(req.priority, ResourceRequest.ANY, req.capability);
} }
protected void decContainerReq(ContainerRequest req) { protected void decContainerReq(ContainerRequest req) {
@ -291,7 +290,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
decResourceRequest(req.priority, rack, req.capability); decResourceRequest(req.priority, rack, req.capability);
} }
decResourceRequest(req.priority, ANY, req.capability); decResourceRequest(req.priority, ResourceRequest.ANY, req.capability);
} }
private void addResourceRequest(Priority priority, String resourceName, private void addResourceRequest(Priority priority, String resourceName,

View File

@ -32,6 +32,8 @@ import static org.mockito.Mockito.when;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.TypeConverter;
@ -78,7 +80,7 @@ public class TestTaskAttemptListenerImpl {
} }
} }
@Test @Test (timeout=5000)
public void testGetTask() throws IOException { public void testGetTask() throws IOException {
AppContext appCtx = mock(AppContext.class); AppContext appCtx = mock(AppContext.class);
JobTokenSecretManager secret = mock(JobTokenSecretManager.class); JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
@ -136,9 +138,30 @@ public class TestTaskAttemptListenerImpl {
assertTrue(result.shouldDie); assertTrue(result.shouldDie);
listener.stop(); listener.stop();
// test JVMID
JVMId jvmid = JVMId.forName("jvm_001_002_m_004");
assertNotNull(jvmid);
try {
JVMId.forName("jvm_001_002_m_004_006");
Assert.fail();
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(),
"TaskId string : jvm_001_002_m_004_006 is not properly formed");
} }
@Test }
@Test (timeout=5000)
public void testJVMId() {
JVMId jvmid = new JVMId("test", 1, true, 2);
JVMId jvmid1 = JVMId.forName("jvm_test_0001_m_000002");
// test compare methot should be the same
assertEquals(0, jvmid.compareTo(jvmid1));
}
@Test (timeout=10000)
public void testGetMapCompletionEvents() throws IOException { public void testGetMapCompletionEvents() throws IOException {
TaskAttemptCompletionEvent[] empty = {}; TaskAttemptCompletionEvent[] empty = {};
TaskAttemptCompletionEvent[] taskEvents = { TaskAttemptCompletionEvent[] taskEvents = {
@ -205,7 +228,7 @@ public class TestTaskAttemptListenerImpl {
return tce; return tce;
} }
@Test @Test (timeout=1000)
public void testCommitWindow() throws IOException { public void testCommitWindow() throws IOException {
SystemClock clock = new SystemClock(); SystemClock clock = new SystemClock();

View File

@ -192,7 +192,7 @@ public class MRApp extends MRAppMaster {
int maps, int reduces, boolean autoComplete, String testName, int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount, Clock clock) { boolean cleanOnStart, int startCount, Clock clock) {
super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System
.currentTimeMillis()); .currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
this.testWorkDir = new File("target", testName); this.testWorkDir = new File("target", testName);
testAbsPath = new Path(testWorkDir.getAbsolutePath()); testAbsPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("PathUsed: " + testAbsPath); LOG.info("PathUsed: " + testAbsPath);

View File

@ -230,7 +230,7 @@ public class MRAppBenchmark {
List<ResourceRequest> askList = request.getAskList(); List<ResourceRequest> askList = request.getAskList();
List<Container> containers = new ArrayList<Container>(); List<Container> containers = new ArrayList<Container>();
for (ResourceRequest req : askList) { for (ResourceRequest req : askList) {
if (req.getHostName() != "*") { if (!ResourceRequest.isAnyLocation(req.getHostName())) {
continue; continue;
} }
int numContainers = req.getNumContainers(); int numContainers = req.getNumContainers();

View File

@ -30,11 +30,15 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent;
import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler;
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.RMHeartbeatHandler;
import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -42,6 +46,7 @@ import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -80,7 +85,7 @@ public class TestMRAppMaster {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMasterTest appMaster = MRAppMasterTest appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis()); System.currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
YarnConfiguration conf = new YarnConfiguration(); YarnConfiguration conf = new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName); MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@ -109,7 +114,8 @@ public class TestMRAppMaster {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster = MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false); System.currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS,
false, false);
boolean caught = false; boolean caught = false;
try { try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName); MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@ -144,7 +150,8 @@ public class TestMRAppMaster {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster = MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false); System.currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS,
false, false);
boolean caught = false; boolean caught = false;
try { try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName); MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@ -179,7 +186,8 @@ public class TestMRAppMaster {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster = MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false); System.currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS,
false, false);
boolean caught = false; boolean caught = false;
try { try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName); MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@ -214,7 +222,8 @@ public class TestMRAppMaster {
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
MRAppMaster appMaster = MRAppMaster appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1, new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), false); System.currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS,
false, false);
boolean caught = false; boolean caught = false;
try { try {
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName); MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@ -230,36 +239,73 @@ public class TestMRAppMaster {
assertEquals(JobStateInternal.ERROR, appMaster.forcedState); assertEquals(JobStateInternal.ERROR, appMaster.forcedState);
appMaster.stop(); appMaster.stop();
} }
@Test (timeout = 30000)
public void testMRAppMasterMaxAppAttempts() throws IOException,
InterruptedException {
int[] maxAppAttemtps = new int[] { 1, 2, 3 };
Boolean[] expectedBools = new Boolean[]{ true, true, false };
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000002";
String containerIdStr = "container_1317529182569_0004_000002_1";
String userName = "TestAppMasterUser";
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
YarnConfiguration conf = new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
File stagingDir =
new File(MRApps.getStagingAreaDir(conf, userName).toString());
stagingDir.mkdirs();
for (int i = 0; i < maxAppAttemtps.length; ++i) {
MRAppMasterTest appMaster =
new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
System.currentTimeMillis(), maxAppAttemtps[i], false, true);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
assertEquals("isLastAMRetry is correctly computed.", expectedBools[i],
appMaster.isLastAMRetry());
}
}
} }
class MRAppMasterTest extends MRAppMaster { class MRAppMasterTest extends MRAppMaster {
Path stagingDirPath; Path stagingDirPath;
private Configuration conf; private Configuration conf;
private boolean overrideInitAndStart; private boolean overrideInit;
private boolean overrideStart;
ContainerAllocator mockContainerAllocator; ContainerAllocator mockContainerAllocator;
CommitterEventHandler mockCommitterEventHandler;
RMHeartbeatHandler mockRMHeartbeatHandler;
public MRAppMasterTest(ApplicationAttemptId applicationAttemptId, public MRAppMasterTest(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String host, int port, int httpPort, ContainerId containerId, String host, int port, int httpPort,
long submitTime) { long submitTime, int maxAppAttempts) {
this(applicationAttemptId, containerId, host, port, httpPort, submitTime, this(applicationAttemptId, containerId, host, port, httpPort,
true); submitTime, maxAppAttempts, true, true);
} }
public MRAppMasterTest(ApplicationAttemptId applicationAttemptId, public MRAppMasterTest(ApplicationAttemptId applicationAttemptId,
ContainerId containerId, String host, int port, int httpPort, ContainerId containerId, String host, int port, int httpPort,
long submitTime, boolean overrideInitAndStart) { long submitTime, int maxAppAttempts, boolean overrideInit,
super(applicationAttemptId, containerId, host, port, httpPort, submitTime); boolean overrideStart) {
this.overrideInitAndStart = overrideInitAndStart; super(applicationAttemptId, containerId, host, port, httpPort, submitTime,
maxAppAttempts);
this.overrideInit = overrideInit;
this.overrideStart = overrideStart;
mockContainerAllocator = mock(ContainerAllocator.class); mockContainerAllocator = mock(ContainerAllocator.class);
mockCommitterEventHandler = mock(CommitterEventHandler.class);
mockRMHeartbeatHandler = mock(RMHeartbeatHandler.class);
} }
@Override @Override
public void init(Configuration conf) { public void init(Configuration conf) {
if (overrideInitAndStart) { if (!overrideInit) {
this.conf = conf;
} else {
super.init(conf); super.init(conf);
} }
this.conf = conf;
} }
@Override @Override
@ -277,9 +323,20 @@ class MRAppMasterTest extends MRAppMaster {
return mockContainerAllocator; return mockContainerAllocator;
} }
@Override
protected EventHandler<CommitterEvent> createCommitterEventHandler(
AppContext context, OutputCommitter committer) {
return mockCommitterEventHandler;
}
@Override
protected RMHeartbeatHandler getRMHeartbeatHandler() {
return mockRMHeartbeatHandler;
}
@Override @Override
public void start() { public void start() {
if (overrideInitAndStart) { if (overrideStart) {
try { try {
String user = UserGroupInformation.getCurrentUser().getShortUserName(); String user = UserGroupInformation.getCurrentUser().getShortUserName();
stagingDirPath = MRApps.getStagingAreaDir(conf, user); stagingDirPath = MRApps.getStagingAreaDir(conf, user);

View File

@ -49,7 +49,6 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.service.AbstractService;
@ -93,10 +92,9 @@ import org.junit.Test;
verify(fs).delete(stagingJobPath, true); verify(fs).delete(stagingJobPath, true);
} }
@Test @Test (timeout = 30000)
public void testDeletionofStagingOnKill() throws IOException { public void testDeletionofStagingOnKill() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir); conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 4);
fs = mock(FileSystem.class); fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists //Staging Dir exists
@ -113,7 +111,7 @@ import org.junit.Test;
JobId jobid = recordFactory.newRecordInstance(JobId.class); JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId); jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class); ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc, 4);
appMaster.init(conf); appMaster.init(conf);
//simulate the process being killed //simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook = MRAppMaster.MRAppMasterShutdownHook hook =
@ -122,10 +120,9 @@ import org.junit.Test;
verify(fs, times(0)).delete(stagingJobPath, true); verify(fs, times(0)).delete(stagingJobPath, true);
} }
@Test @Test (timeout = 30000)
public void testDeletionofStagingOnKillLastTry() throws IOException { public void testDeletionofStagingOnKillLastTry() throws IOException {
conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir); conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, 1);
fs = mock(FileSystem.class); fs = mock(FileSystem.class);
when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true); when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
//Staging Dir exists //Staging Dir exists
@ -142,7 +139,8 @@ import org.junit.Test;
JobId jobid = recordFactory.newRecordInstance(JobId.class); JobId jobid = recordFactory.newRecordInstance(JobId.class);
jobid.setAppId(appId); jobid.setAppId(appId);
ContainerAllocator mockAlloc = mock(ContainerAllocator.class); ContainerAllocator mockAlloc = mock(ContainerAllocator.class);
MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc); MRAppMaster appMaster = new TestMRApp(attemptId, mockAlloc,
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
appMaster.init(conf); appMaster.init(conf);
//simulate the process being killed //simulate the process being killed
MRAppMaster.MRAppMasterShutdownHook hook = MRAppMaster.MRAppMasterShutdownHook hook =
@ -155,15 +153,16 @@ import org.junit.Test;
ContainerAllocator allocator; ContainerAllocator allocator;
public TestMRApp(ApplicationAttemptId applicationAttemptId, public TestMRApp(ApplicationAttemptId applicationAttemptId,
ContainerAllocator allocator) { ContainerAllocator allocator, int maxAppAttempts) {
super(applicationAttemptId, BuilderUtils.newContainerId( super(applicationAttemptId, BuilderUtils.newContainerId(
applicationAttemptId, 1), "testhost", 2222, 3333, System applicationAttemptId, 1), "testhost", 2222, 3333,
.currentTimeMillis()); System.currentTimeMillis(), maxAppAttempts);
this.allocator = allocator; this.allocator = allocator;
} }
public TestMRApp(ApplicationAttemptId applicationAttemptId) { public TestMRApp(ApplicationAttemptId applicationAttemptId) {
this(applicationAttemptId, null); this(applicationAttemptId, null,
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
} }
@Override @Override

View File

@ -37,9 +37,7 @@ public class LocalClientProtocolProvider extends ClientProtocolProvider {
if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) { if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
return null; return null;
} }
if (conf.get("mapreduce.job.maps") == null) { conf.setInt(JobContext.NUM_MAPS, 1);
conf.setInt("mapreduce.job.maps", 1);
}
return new LocalJobRunner(conf); return new LocalJobRunner(conf);
} }

View File

@ -19,8 +19,6 @@
package org.apache.hadoop.mapred; package org.apache.hadoop.mapred;
import java.io.IOException; import java.io.IOException;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.Delayed; import java.util.concurrent.Delayed;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -39,62 +37,7 @@ public class JobEndNotifier {
private static final Log LOG = private static final Log LOG =
LogFactory.getLog(JobEndNotifier.class.getName()); LogFactory.getLog(JobEndNotifier.class.getName());
private static Thread thread;
private static volatile boolean running;
private static BlockingQueue<JobEndStatusInfo> queue =
new DelayQueue<JobEndStatusInfo>();
public static void startNotifier() {
running = true;
thread = new Thread(
new Runnable() {
public void run() {
try {
while (running) {
sendNotification(queue.take());
}
}
catch (InterruptedException irex) {
if (running) {
LOG.error("Thread has ended unexpectedly", irex);
}
}
}
private void sendNotification(JobEndStatusInfo notification) {
try {
int code = httpNotification(notification.getUri());
if (code != 200) {
throw new IOException("Invalid response status code: " + code);
}
}
catch (IOException ioex) {
LOG.error("Notification failure [" + notification + "]", ioex);
if (notification.configureForRetry()) {
try {
queue.put(notification);
}
catch (InterruptedException iex) {
LOG.error("Notification queuing error [" + notification + "]",
iex);
}
}
}
catch (Exception ex) {
LOG.error("Notification failure [" + notification + "]", ex);
}
}
}
);
thread.start();
}
public static void stopNotifier() {
running = false;
thread.interrupt();
}
private static JobEndStatusInfo createNotification(JobConf conf, private static JobEndStatusInfo createNotification(JobConf conf,
JobStatus status) { JobStatus status) {
@ -118,18 +61,6 @@ public class JobEndNotifier {
return notification; return notification;
} }
public static void registerNotification(JobConf jobConf, JobStatus status) {
JobEndStatusInfo notification = createNotification(jobConf, status);
if (notification != null) {
try {
queue.put(notification);
}
catch (InterruptedException iex) {
LOG.error("Notification queuing failure [" + notification + "]", iex);
}
}
}
private static int httpNotification(String uri) throws IOException { private static int httpNotification(String uri) throws IOException {
URI url = new URI(uri, false); URI url = new URI(uri, false);
HttpClient m_client = new HttpClient(); HttpClient m_client = new HttpClient();
@ -194,10 +125,6 @@ public class JobEndNotifier {
return retryInterval; return retryInterval;
} }
public long getDelayTime() {
return delayTime;
}
public boolean configureForRetry() { public boolean configureForRetry() {
boolean retry = false; boolean retry = false;
if (getRetryAttempts() > 0) { if (getRetryAttempts() > 0) {

View File

@ -40,7 +40,6 @@ import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.List;
import java.net.URL; import java.net.URL;
@ -487,72 +486,7 @@ public class QueueManager {
new QueueAclsInfo[queueAclsInfolist.size()]); new QueueAclsInfo[queueAclsInfolist.size()]);
} }
/**
* ONLY FOR TESTING - Do not use in production code.
* This method is used for setting up of leafQueues only.
* We are not setting the hierarchy here.
*
* @param queues
*/
synchronized void setQueues(Queue[] queues) {
root.getChildren().clear();
leafQueues.clear();
allQueues.clear();
for (Queue queue : queues) {
root.addChild(queue);
}
//At this point we have root populated
//update data structures leafNodes.
leafQueues = getRoot().getLeafQueues();
allQueues.putAll(getRoot().getInnerQueues());
allQueues.putAll(leafQueues);
}
/**
* Return an array of {@link JobQueueInfo} objects for the root
* queues configured in the system.
* <p/>
* Root queues are queues that are at the top-most level in the
* hierarchy of queues in mapred-queues.xml, or they are the queues
* configured in the mapred.queue.names key in mapred-site.xml.
*
* @return array of JobQueueInfo objects for root level queues.
*/
JobQueueInfo[] getRootQueues() {
List<JobQueueInfo> list = getRoot().getJobQueueInfo().getChildren();
return list.toArray(new JobQueueInfo[list.size()]);
}
/**
* Get the complete hierarchy of children for queue
* queueName
*
* @param queueName
* @return
*/
JobQueueInfo[] getChildQueues(String queueName) {
List<JobQueueInfo> list =
allQueues.get(queueName).getJobQueueInfo().getChildren();
if (list != null) {
return list.toArray(new JobQueueInfo[list.size()]);
} else {
return new JobQueueInfo[0];
}
}
/**
* Used only for testing purposes .
* This method is unstable as refreshQueues would leave this
* data structure in unstable state.
*
* @param queueName
* @return
*/
Queue getQueue(String queueName) {
return this.allQueues.get(queueName);
}
/** /**
@ -573,28 +507,6 @@ public class QueueManager {
return root; return root;
} }
/**
* Returns the specific queue ACL for the given queue.
* Returns null if the given queue does not exist or the acl is not
* configured for that queue.
* If acls are disabled(mapreduce.cluster.acls.enabled set to false), returns
* ACL with all users.
*/
synchronized AccessControlList getQueueACL(String queueName,
QueueACL qACL) {
if (areAclsEnabled) {
Queue q = leafQueues.get(queueName);
if (q != null) {
return q.getAcls().get(toFullPropertyName(
queueName, qACL.getAclName()));
}
else {
LOG.warn("Queue " + queueName + " is not present.");
return null;
}
}
return new AccessControlList("*");
}
/** /**
* Dumps the configuration of hierarchy of queues * Dumps the configuration of hierarchy of queues

View File

@ -386,73 +386,6 @@ public class TaskLog {
return conf.getLong(JobContext.TASK_USERLOG_LIMIT, 0) * 1024; return conf.getLong(JobContext.TASK_USERLOG_LIMIT, 0) * 1024;
} }
/**
* Wrap a command in a shell to capture stdout and stderr to files.
* If the tailLength is 0, the entire output will be saved.
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @return the modified command that should be run
*/
public static List<String> captureOutAndError(List<String> cmd,
File stdoutFilename,
File stderrFilename,
long tailLength
) throws IOException {
return captureOutAndError(null, cmd, stdoutFilename,
stderrFilename, tailLength, false);
}
/**
* Wrap a command in a shell to capture stdout and stderr to files.
* Setup commands such as setting memory limit can be passed which
* will be executed before exec.
* If the tailLength is 0, the entire output will be saved.
* @param setup The setup commands for the execed process.
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @return the modified command that should be run
*/
public static List<String> captureOutAndError(List<String> setup,
List<String> cmd,
File stdoutFilename,
File stderrFilename,
long tailLength
) throws IOException {
return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename,
tailLength, false);
}
/**
* Wrap a command in a shell to capture stdout and stderr to files.
* Setup commands such as setting memory limit can be passed which
* will be executed before exec.
* If the tailLength is 0, the entire output will be saved.
* @param setup The setup commands for the execed process.
* @param cmd The command and the arguments that should be run
* @param stdoutFilename The filename that stdout should be saved to
* @param stderrFilename The filename that stderr should be saved to
* @param tailLength The length of the tail to be saved.
* @param pidFileName The name of the pid-file. pid-file's usage is deprecated
* @return the modified command that should be run
*
* @deprecated pidFiles are no more used. Instead pid is exported to
* env variable JVM_PID.
*/
@Deprecated
public static List<String> captureOutAndError(List<String> setup,
List<String> cmd,
File stdoutFilename,
File stderrFilename,
long tailLength,
String pidFileName
) throws IOException {
return captureOutAndError(setup, cmd, stdoutFilename, stderrFilename,
tailLength, false);
}
/** /**
* Wrap a command in a shell to capture stdout and stderr to files. * Wrap a command in a shell to capture stdout and stderr to files.
@ -607,25 +540,6 @@ public class TaskLog {
return command.toString(); return command.toString();
} }
/**
* Wrap a command in a shell to capture debug script's
* stdout and stderr to debugout.
* @param cmd The command and the arguments that should be run
* @param debugoutFilename The filename that stdout and stderr
* should be saved to.
* @return the modified command that should be run
* @throws IOException
*/
public static List<String> captureDebugOut(List<String> cmd,
File debugoutFilename
) throws IOException {
String debugout = FileUtil.makeShellPath(debugoutFilename);
List<String> result = new ArrayList<String>(3);
result.add(bashCommand);
result.add("-c");
result.add(buildDebugScriptCommandLine(cmd, debugout));
return result;
}
/** /**
* Method to return the location of user log directory. * Method to return the location of user log directory.

View File

@ -523,17 +523,5 @@ public abstract class TaskStatus implements Writable, Cloneable {
return (isMap) ? new MapTaskStatus() : new ReduceTaskStatus(); return (isMap) ? new MapTaskStatus() : new ReduceTaskStatus();
} }
static TaskStatus readTaskStatus(DataInput in) throws IOException {
boolean isMap = in.readBoolean();
TaskStatus taskStatus = createTaskStatus(isMap);
taskStatus.readFields(in);
return taskStatus;
}
static void writeTaskStatus(DataOutput out, TaskStatus taskStatus)
throws IOException {
out.writeBoolean(taskStatus.getIsMap());
taskStatus.write(out);
}
} }

View File

@ -664,4 +664,12 @@ public interface MRJobConfig {
public static final String WORKFLOW_ADJACENCY_PREFIX_PATTERN = public static final String WORKFLOW_ADJACENCY_PREFIX_PATTERN =
"^mapreduce\\.workflow\\.adjacency\\..+"; "^mapreduce\\.workflow\\.adjacency\\..+";
/**
* The maximum number of application attempts.
* It is a application-specific setting.
*/
public static final String MR_AM_MAX_ATTEMPTS = "mapreduce.am.max-attempts";
public static final int DEFAULT_MR_AM_MAX_ATTEMPTS = 1;
} }

View File

@ -1,418 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.BufferedReader;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Charsets;
/**
* Plugin to calculate resource information on Linux systems.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LinuxResourceCalculatorPlugin extends ResourceCalculatorPlugin {
private static final Log LOG =
LogFactory.getLog(LinuxResourceCalculatorPlugin.class);
public static final int UNAVAILABLE = -1;
/**
* proc's meminfo virtual file has keys-values in the format
* "key:[ \t]*value[ \t]kB".
*/
private static final String PROCFS_MEMFILE = "/proc/meminfo";
private static final Pattern PROCFS_MEMFILE_FORMAT =
Pattern.compile("^([a-zA-Z]*):[ \t]*([0-9]*)[ \t]kB");
// We need the values for the following keys in meminfo
private static final String MEMTOTAL_STRING = "MemTotal";
private static final String SWAPTOTAL_STRING = "SwapTotal";
private static final String MEMFREE_STRING = "MemFree";
private static final String SWAPFREE_STRING = "SwapFree";
private static final String INACTIVE_STRING = "Inactive";
/**
* Patterns for parsing /proc/cpuinfo
*/
private static final String PROCFS_CPUINFO = "/proc/cpuinfo";
private static final Pattern PROCESSOR_FORMAT =
Pattern.compile("^processor[ \t]:[ \t]*([0-9]*)");
private static final Pattern FREQUENCY_FORMAT =
Pattern.compile("^cpu MHz[ \t]*:[ \t]*([0-9.]*)");
/**
* Pattern for parsing /proc/stat
*/
private static final String PROCFS_STAT = "/proc/stat";
private static final Pattern CPU_TIME_FORMAT =
Pattern.compile("^cpu[ \t]*([0-9]*)" +
"[ \t]*([0-9]*)[ \t]*([0-9]*)[ \t].*");
private String procfsMemFile;
private String procfsCpuFile;
private String procfsStatFile;
long jiffyLengthInMillis;
private long ramSize = 0;
private long swapSize = 0;
private long ramSizeFree = 0; // free ram space on the machine (kB)
private long swapSizeFree = 0; // free swap space on the machine (kB)
private long inactiveSize = 0; // inactive cache memory (kB)
private int numProcessors = 0; // number of processors on the system
private long cpuFrequency = 0L; // CPU frequency on the system (kHz)
private long cumulativeCpuTime = 0L; // CPU used time since system is on (ms)
private long lastCumulativeCpuTime = 0L; // CPU used time read last time (ms)
// Unix timestamp while reading the CPU time (ms)
private float cpuUsage = UNAVAILABLE;
private long sampleTime = UNAVAILABLE;
private long lastSampleTime = UNAVAILABLE;
private ProcfsBasedProcessTree pTree = null;
boolean readMemInfoFile = false;
boolean readCpuInfoFile = false;
/**
* Get current time
* @return Unix time stamp in millisecond
*/
long getCurrentTime() {
return System.currentTimeMillis();
}
public LinuxResourceCalculatorPlugin() {
procfsMemFile = PROCFS_MEMFILE;
procfsCpuFile = PROCFS_CPUINFO;
procfsStatFile = PROCFS_STAT;
jiffyLengthInMillis = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS;
String pid = System.getenv().get("JVM_PID");
pTree = new ProcfsBasedProcessTree(pid);
}
/**
* Constructor which allows assigning the /proc/ directories. This will be
* used only in unit tests
* @param procfsMemFile fake file for /proc/meminfo
* @param procfsCpuFile fake file for /proc/cpuinfo
* @param procfsStatFile fake file for /proc/stat
* @param jiffyLengthInMillis fake jiffy length value
*/
public LinuxResourceCalculatorPlugin(String procfsMemFile,
String procfsCpuFile,
String procfsStatFile,
long jiffyLengthInMillis) {
this.procfsMemFile = procfsMemFile;
this.procfsCpuFile = procfsCpuFile;
this.procfsStatFile = procfsStatFile;
this.jiffyLengthInMillis = jiffyLengthInMillis;
String pid = System.getenv().get("JVM_PID");
pTree = new ProcfsBasedProcessTree(pid);
}
/**
* Read /proc/meminfo, parse and compute memory information only once
*/
private void readProcMemInfoFile() {
readProcMemInfoFile(false);
}
/**
* Read /proc/meminfo, parse and compute memory information
* @param readAgain if false, read only on the first time
*/
private void readProcMemInfoFile(boolean readAgain) {
if (readMemInfoFile && !readAgain) {
return;
}
// Read "/proc/memInfo" file
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(new FileInputStream(procfsMemFile),
Charsets.UTF_8);
in = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
// shouldn't happen....
return;
}
Matcher mat = null;
try {
String str = in.readLine();
while (str != null) {
mat = PROCFS_MEMFILE_FORMAT.matcher(str);
if (mat.find()) {
if (mat.group(1).equals(MEMTOTAL_STRING)) {
ramSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(SWAPTOTAL_STRING)) {
swapSize = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(MEMFREE_STRING)) {
ramSizeFree = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(SWAPFREE_STRING)) {
swapSizeFree = Long.parseLong(mat.group(2));
} else if (mat.group(1).equals(INACTIVE_STRING)) {
inactiveSize = Long.parseLong(mat.group(2));
}
}
str = in.readLine();
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
readMemInfoFile = true;
}
/**
* Read /proc/cpuinfo, parse and calculate CPU information
*/
private void readProcCpuInfoFile() {
// This directory needs to be read only once
if (readCpuInfoFile) {
return;
}
// Read "/proc/cpuinfo" file
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(new FileInputStream(procfsCpuFile),
Charsets.UTF_8);
in = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
// shouldn't happen....
return;
}
Matcher mat = null;
try {
numProcessors = 0;
String str = in.readLine();
while (str != null) {
mat = PROCESSOR_FORMAT.matcher(str);
if (mat.find()) {
numProcessors++;
}
mat = FREQUENCY_FORMAT.matcher(str);
if (mat.find()) {
cpuFrequency = (long)(Double.parseDouble(mat.group(1)) * 1000); // kHz
}
str = in.readLine();
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
readCpuInfoFile = true;
}
/**
* Read /proc/stat file, parse and calculate cumulative CPU
*/
private void readProcStatFile() {
// Read "/proc/stat" file
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(new FileInputStream(procfsStatFile),
Charsets.UTF_8);
in = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
// shouldn't happen....
return;
}
Matcher mat = null;
try {
String str = in.readLine();
while (str != null) {
mat = CPU_TIME_FORMAT.matcher(str);
if (mat.find()) {
long uTime = Long.parseLong(mat.group(1));
long nTime = Long.parseLong(mat.group(2));
long sTime = Long.parseLong(mat.group(3));
cumulativeCpuTime = uTime + nTime + sTime; // milliseconds
break;
}
str = in.readLine();
}
cumulativeCpuTime *= jiffyLengthInMillis;
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
}
/** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
readProcMemInfoFile();
return ramSize * 1024;
}
/** {@inheritDoc} */
@Override
public long getVirtualMemorySize() {
readProcMemInfoFile();
return (ramSize + swapSize) * 1024;
}
/** {@inheritDoc} */
@Override
public long getAvailablePhysicalMemorySize() {
readProcMemInfoFile(true);
return (ramSizeFree + inactiveSize) * 1024;
}
/** {@inheritDoc} */
@Override
public long getAvailableVirtualMemorySize() {
readProcMemInfoFile(true);
return (ramSizeFree + swapSizeFree + inactiveSize) * 1024;
}
/** {@inheritDoc} */
@Override
public int getNumProcessors() {
readProcCpuInfoFile();
return numProcessors;
}
/** {@inheritDoc} */
@Override
public long getCpuFrequency() {
readProcCpuInfoFile();
return cpuFrequency;
}
/** {@inheritDoc} */
@Override
public long getCumulativeCpuTime() {
readProcStatFile();
return cumulativeCpuTime;
}
/** {@inheritDoc} */
@Override
public float getCpuUsage() {
readProcStatFile();
sampleTime = getCurrentTime();
if (lastSampleTime == UNAVAILABLE ||
lastSampleTime > sampleTime) {
// lastSampleTime > sampleTime may happen when the system time is changed
lastSampleTime = sampleTime;
lastCumulativeCpuTime = cumulativeCpuTime;
return cpuUsage;
}
// When lastSampleTime is sufficiently old, update cpuUsage.
// Also take a sample of the current time and cumulative CPU time for the
// use of the next calculation.
final long MINIMUM_UPDATE_INTERVAL = 10 * jiffyLengthInMillis;
if (sampleTime > lastSampleTime + MINIMUM_UPDATE_INTERVAL) {
cpuUsage = (float)(cumulativeCpuTime - lastCumulativeCpuTime) * 100F /
((float)(sampleTime - lastSampleTime) * getNumProcessors());
lastSampleTime = sampleTime;
lastCumulativeCpuTime = cumulativeCpuTime;
}
return cpuUsage;
}
/**
* Test the {@link LinuxResourceCalculatorPlugin}
*
* @param args
*/
public static void main(String[] args) {
LinuxResourceCalculatorPlugin plugin = new LinuxResourceCalculatorPlugin();
System.out.println("Physical memory Size (bytes) : "
+ plugin.getPhysicalMemorySize());
System.out.println("Total Virtual memory Size (bytes) : "
+ plugin.getVirtualMemorySize());
System.out.println("Available Physical memory Size (bytes) : "
+ plugin.getAvailablePhysicalMemorySize());
System.out.println("Total Available Virtual memory Size (bytes) : "
+ plugin.getAvailableVirtualMemorySize());
System.out.println("Number of Processors : " + plugin.getNumProcessors());
System.out.println("CPU frequency (kHz) : " + plugin.getCpuFrequency());
System.out.println("Cumulative CPU time (ms) : " +
plugin.getCumulativeCpuTime());
try {
// Sleep so we can compute the CPU usage
Thread.sleep(500L);
} catch (InterruptedException e) {
// do nothing
}
System.out.println("CPU usage % : " + plugin.getCpuUsage());
}
@Override
public ProcResourceValues getProcResourceValues() {
pTree.updateProcessTree();
long cpuTime = pTree.getCumulativeCpuTime();
long pMem = pTree.getCumulativeRssmem();
long vMem = pTree.getCumulativeVmem();
return new ProcResourceValues(cpuTime, pMem, vMem);
}
}

View File

@ -1,743 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.math.BigInteger;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.LinkedList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.util.StringUtils;
import com.google.common.base.Charsets;
/**
* A Proc file-system based ProcessTree. Works only on Linux.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ProcfsBasedProcessTree extends ProcessTree {
static final Log LOG = LogFactory
.getLog(ProcfsBasedProcessTree.class);
private static final String PROCFS = "/proc/";
private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern .compile(
"^([0-9-]+)\\s([^\\s]+)\\s[^\\s]\\s([0-9-]+)\\s([0-9-]+)\\s([0-9-]+)\\s" +
"([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)\\s([0-9-]+\\s){7}([0-9]+)\\s([0-9]+)" +
"(\\s[0-9-]+){15}");
static final String PROCFS_STAT_FILE = "stat";
static final String PROCFS_CMDLINE_FILE = "cmdline";
public static final long PAGE_SIZE;
static {
ShellCommandExecutor shellExecutor =
new ShellCommandExecutor(new String[]{"getconf", "PAGESIZE"});
long pageSize = -1;
try {
shellExecutor.execute();
pageSize = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
} finally {
PAGE_SIZE = pageSize;
}
}
public static final long JIFFY_LENGTH_IN_MILLIS; // in millisecond
static {
ShellCommandExecutor shellExecutor =
new ShellCommandExecutor(new String[]{"getconf", "CLK_TCK"});
long jiffiesPerSecond = -1;
try {
shellExecutor.execute();
jiffiesPerSecond = Long.parseLong(shellExecutor.getOutput().replace("\n", ""));
} catch (IOException e) {
LOG.error(StringUtils.stringifyException(e));
} finally {
JIFFY_LENGTH_IN_MILLIS = jiffiesPerSecond != -1 ?
Math.round(1000D / jiffiesPerSecond) : -1;
}
}
// to enable testing, using this variable which can be configured
// to a test directory.
private String procfsDir;
static private String deadPid = "-1";
private String pid = deadPid;
static private Pattern numberPattern = Pattern.compile("[1-9][0-9]*");
private Long cpuTime = 0L;
private boolean setsidUsed = false;
private long sleeptimeBeforeSigkill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
private Map<String, ProcessInfo> processTree = new HashMap<String, ProcessInfo>();
public ProcfsBasedProcessTree(String pid) {
this(pid, false, DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
}
public ProcfsBasedProcessTree(String pid, boolean setsidUsed,
long sigkillInterval) {
this(pid, setsidUsed, sigkillInterval, PROCFS);
}
/**
* Build a new process tree rooted at the pid.
*
* This method is provided mainly for testing purposes, where
* the root of the proc file system can be adjusted.
*
* @param pid root of the process tree
* @param setsidUsed true, if setsid was used for the root pid
* @param sigkillInterval how long to wait between a SIGTERM and SIGKILL
* when killing a process tree
* @param procfsDir the root of a proc file system - only used for testing.
*/
public ProcfsBasedProcessTree(String pid, boolean setsidUsed,
long sigkillInterval, String procfsDir) {
this.pid = getValidPID(pid);
this.setsidUsed = setsidUsed;
sleeptimeBeforeSigkill = sigkillInterval;
this.procfsDir = procfsDir;
}
/**
* Sets SIGKILL interval
* @deprecated Use {@link ProcfsBasedProcessTree#ProcfsBasedProcessTree(
* String, boolean, long)} instead
* @param interval The time to wait before sending SIGKILL
* after sending SIGTERM
*/
@Deprecated
public void setSigKillInterval(long interval) {
sleeptimeBeforeSigkill = interval;
}
/**
* Checks if the ProcfsBasedProcessTree is available on this system.
*
* @return true if ProcfsBasedProcessTree is available. False otherwise.
*/
public static boolean isAvailable() {
try {
String osName = System.getProperty("os.name");
if (!osName.startsWith("Linux")) {
LOG.info("ProcfsBasedProcessTree currently is supported only on "
+ "Linux.");
return false;
}
} catch (SecurityException se) {
LOG.warn("Failed to get Operating System name. " + se);
return false;
}
return true;
}
/**
* Update the process-tree with latest state. If the root-process is not alive,
* tree will become empty.
*/
public void updateProcessTree() {
if (!pid.equals(deadPid)) {
// Get the list of processes
List<String> processList = getProcessList();
Map<String, ProcessInfo> allProcessInfo = new HashMap<String, ProcessInfo>();
// cache the processTree to get the age for processes
Map<String, ProcessInfo> oldProcs =
new HashMap<String, ProcessInfo>(processTree);
processTree.clear();
ProcessInfo me = null;
for (String proc : processList) {
// Get information for each process
ProcessInfo pInfo = new ProcessInfo(proc);
if (constructProcessInfo(pInfo, procfsDir) != null) {
allProcessInfo.put(proc, pInfo);
if (proc.equals(this.pid)) {
me = pInfo; // cache 'me'
processTree.put(proc, pInfo);
}
}
}
if (me == null) {
return;
}
// Add each process to its parent.
for (Map.Entry<String, ProcessInfo> entry : allProcessInfo.entrySet()) {
String pID = entry.getKey();
if (!pID.equals("1")) {
ProcessInfo pInfo = entry.getValue();
ProcessInfo parentPInfo = allProcessInfo.get(pInfo.getPpid());
if (parentPInfo != null) {
parentPInfo.addChild(pInfo);
}
}
}
// now start constructing the process-tree
LinkedList<ProcessInfo> pInfoQueue = new LinkedList<ProcessInfo>();
pInfoQueue.addAll(me.getChildren());
while (!pInfoQueue.isEmpty()) {
ProcessInfo pInfo = pInfoQueue.remove();
if (!processTree.containsKey(pInfo.getPid())) {
processTree.put(pInfo.getPid(), pInfo);
}
pInfoQueue.addAll(pInfo.getChildren());
}
// update age values and compute the number of jiffies since last update
for (Map.Entry<String, ProcessInfo> procs : processTree.entrySet()) {
ProcessInfo oldInfo = oldProcs.get(procs.getKey());
if (procs.getValue() != null) {
procs.getValue().updateJiffy(oldInfo);
if (oldInfo != null) {
procs.getValue().updateAge(oldInfo);
}
}
}
if (LOG.isDebugEnabled()) {
// Log.debug the ProcfsBasedProcessTree
LOG.debug(this.toString());
}
}
}
/**
* Is the root-process alive?
*
* @return true if the root-process is alive, false otherwise.
*/
public boolean isAlive() {
if (pid.equals(deadPid)) {
return false;
} else {
return isAlive(pid);
}
}
/**
* Is any of the subprocesses in the process-tree alive?
*
* @return true if any of the processes in the process-tree is
* alive, false otherwise.
*/
public boolean isAnyProcessInTreeAlive() {
for (String pId : processTree.keySet()) {
if (isAlive(pId)) {
return true;
}
}
return false;
}
/** Verify that the given process id is same as its process group id.
* @param pidStr Process id of the to-be-verified-process
* @param procfsDir Procfs root dir
*/
static boolean checkPidPgrpidForMatch(String pidStr, String procfsDir) {
// Get information for this process
ProcessInfo pInfo = new ProcessInfo(pidStr);
pInfo = constructProcessInfo(pInfo, procfsDir);
if (pInfo == null) {
// process group leader may have finished execution, but we still need to
// kill the subProcesses in the process group.
return true;
}
String pgrpId = pInfo.getPgrpId().toString();
//make sure that pId and its pgrpId match
if (!pgrpId.equals(pidStr)) {
LOG.warn("Unexpected: Process with PID " + pidStr +
" is not a process group leader. pgrpId is: " + pInfo.getPgrpId());
return false;
}
if (LOG.isDebugEnabled()) {
LOG.debug(pidStr + " is a process group leader, as expected.");
}
return true;
}
/** Make sure that the given pid is a process group leader and then
* destroy the process group.
* @param pgrpId Process group id of to-be-killed-processes
* @param interval The time to wait before sending SIGKILL
* after sending SIGTERM
* @param inBackground Process is to be killed in the back ground with
* a separate thread
*/
public static void assertAndDestroyProcessGroup(String pgrpId, long interval,
boolean inBackground)
throws IOException {
// Make sure that the pid given is a process group leader
if (!checkPidPgrpidForMatch(pgrpId, PROCFS)) {
throw new IOException("Process with PID " + pgrpId +
" is not a process group leader.");
}
destroyProcessGroup(pgrpId, interval, inBackground);
}
/**
* Destroy the process-tree.
*/
public void destroy() {
destroy(true);
}
/**
* Destroy the process-tree.
* @param inBackground Process is to be killed in the back ground with
* a separate thread
*/
public void destroy(boolean inBackground) {
LOG.debug("Killing ProcfsBasedProcessTree of " + pid);
if (pid.equals(deadPid)) {
return;
}
if (isAlive(pid.toString())) {
if (isSetsidAvailable && setsidUsed) {
// In this case, we know that pid got created using setsid. So kill the
// whole processGroup.
try {
assertAndDestroyProcessGroup(pid.toString(), sleeptimeBeforeSigkill,
inBackground);
} catch (IOException e) {
LOG.warn(StringUtils.stringifyException(e));
}
}
else {
//TODO: Destroy all the processes in the subtree in this case also.
// For the time being, killing only the root process.
destroyProcess(pid.toString(), sleeptimeBeforeSigkill, inBackground);
}
}
}
private static final String PROCESSTREE_DUMP_FORMAT =
"\t|- %s %s %d %d %s %d %d %d %d %s%n";
/**
* Get a dump of the process-tree.
*
* @return a string concatenating the dump of information of all the processes
* in the process-tree
*/
public String getProcessTreeDump() {
StringBuilder ret = new StringBuilder();
// The header.
ret.append(String.format("\t|- PID PPID PGRPID SESSID CMD_NAME "
+ "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) "
+ "RSSMEM_USAGE(PAGES) FULL_CMD_LINE%n"));
for (ProcessInfo p : processTree.values()) {
if (p != null) {
ret.append(String.format(PROCESSTREE_DUMP_FORMAT, p.getPid(), p
.getPpid(), p.getPgrpId(), p.getSessionId(), p.getName(), p
.getUtime(), p.getStime(), p.getVmem(), p.getRssmemPage(), p
.getCmdLine(procfsDir)));
}
}
return ret.toString();
}
/**
* Get the cumulative virtual memory used by all the processes in the
* process-tree.
*
* @return cumulative virtual memory used by the process-tree in bytes.
*/
public long getCumulativeVmem() {
// include all processes.. all processes will be older than 0.
return getCumulativeVmem(0);
}
/**
* Get the cumulative resident set size (rss) memory used by all the processes
* in the process-tree.
*
* @return cumulative rss memory used by the process-tree in bytes. return 0
* if it cannot be calculated
*/
public long getCumulativeRssmem() {
// include all processes.. all processes will be older than 0.
return getCumulativeRssmem(0);
}
/**
* Get the cumulative virtual memory used by all the processes in the
* process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return cumulative virtual memory used by the process-tree in bytes,
* for processes older than this age.
*/
public long getCumulativeVmem(int olderThanAge) {
long total = 0;
for (ProcessInfo p : processTree.values()) {
if ((p != null) && (p.getAge() > olderThanAge)) {
total += p.getVmem();
}
}
return total;
}
/**
* Get the cumulative resident set size (rss) memory used by all the processes
* in the process-tree that are older than the passed in age.
*
* @param olderThanAge processes above this age are included in the
* memory addition
* @return cumulative rss memory used by the process-tree in bytes,
* for processes older than this age. return 0 if it cannot be
* calculated
*/
public long getCumulativeRssmem(int olderThanAge) {
if (PAGE_SIZE < 0) {
return 0;
}
long totalPages = 0;
for (ProcessInfo p : processTree.values()) {
if ((p != null) && (p.getAge() > olderThanAge)) {
totalPages += p.getRssmemPage();
}
}
return totalPages * PAGE_SIZE; // convert # pages to byte
}
/**
* Get the CPU time in millisecond used by all the processes in the
* process-tree since the process-tree created
*
* @return cumulative CPU time in millisecond since the process-tree created
* return 0 if it cannot be calculated
*/
public long getCumulativeCpuTime() {
if (JIFFY_LENGTH_IN_MILLIS < 0) {
return 0;
}
long incJiffies = 0;
for (ProcessInfo p : processTree.values()) {
if (p != null) {
incJiffies += p.dtime;
}
}
cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS;
return cpuTime;
}
private static String getValidPID(String pid) {
if (pid == null) return deadPid;
Matcher m = numberPattern.matcher(pid);
if (m.matches()) return pid;
return deadPid;
}
/**
* Get the list of all processes in the system.
*/
private List<String> getProcessList() {
String[] processDirs = (new File(procfsDir)).list();
List<String> processList = new ArrayList<String>();
for (String dir : processDirs) {
Matcher m = numberPattern.matcher(dir);
if (!m.matches()) continue;
try {
if ((new File(procfsDir, dir)).isDirectory()) {
processList.add(dir);
}
} catch (SecurityException s) {
// skip this process
}
}
return processList;
}
/**
* Construct the ProcessInfo using the process' PID and procfs rooted at the
* specified directory and return the same. It is provided mainly to assist
* testing purposes.
*
* Returns null on failing to read from procfs,
*
* @param pinfo ProcessInfo that needs to be updated
* @param procfsDir root of the proc file system
* @return updated ProcessInfo, null on errors.
*/
private static ProcessInfo constructProcessInfo(ProcessInfo pinfo,
String procfsDir) {
ProcessInfo ret = null;
// Read "procfsDir/<pid>/stat" file - typically /proc/<pid>/stat
BufferedReader in = null;
InputStreamReader fReader = null;
try {
File pidDir = new File(procfsDir, pinfo.getPid());
fReader = new InputStreamReader(new FileInputStream(
new File(pidDir, PROCFS_STAT_FILE)), Charsets.UTF_8);
in = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
// The process vanished in the interim!
LOG.info("The process " + pinfo.getPid()
+ " may have finished in the interim.");
return ret;
}
ret = pinfo;
try {
String str = in.readLine(); // only one line
Matcher m = PROCFS_STAT_FILE_FORMAT.matcher(str);
boolean mat = m.find();
if (mat) {
// Set (name) (ppid) (pgrpId) (session) (utime) (stime) (vsize) (rss)
pinfo.updateProcessInfo(m.group(2), m.group(3),
Integer.parseInt(m.group(4)), Integer.parseInt(m.group(5)),
Long.parseLong(m.group(7)), new BigInteger(m.group(8)),
Long.parseLong(m.group(10)), Long.parseLong(m.group(11)));
} else {
LOG.warn("Unexpected: procfs stat file is not in the expected format"
+ " for process with pid " + pinfo.getPid());
ret = null;
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
ret = null;
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return ret;
}
/**
* Returns a string printing PIDs of process present in the
* ProcfsBasedProcessTree. Output format : [pid pid ..]
*/
public String toString() {
StringBuffer pTree = new StringBuffer("[ ");
for (String p : processTree.keySet()) {
pTree.append(p);
pTree.append(" ");
}
return pTree.substring(0, pTree.length()) + "]";
}
/**
*
* Class containing information of a process.
*
*/
private static class ProcessInfo {
private String pid; // process-id
private String name; // command name
private Integer pgrpId; // process group-id
private String ppid; // parent process-id
private Integer sessionId; // session-id
private Long vmem; // virtual memory usage
private Long rssmemPage; // rss memory usage in # of pages
private Long utime = 0L; // # of jiffies in user mode
private final BigInteger MAX_LONG = BigInteger.valueOf(Long.MAX_VALUE);
private BigInteger stime = new BigInteger("0"); // # of jiffies in kernel mode
// how many times has this process been seen alive
private int age;
// # of jiffies used since last update:
private Long dtime = 0L;
// dtime = (utime + stime) - (utimeOld + stimeOld)
// We need this to compute the cumulative CPU time
// because the subprocess may finish earlier than root process
private List<ProcessInfo> children = new ArrayList<ProcessInfo>(); // list of children
public ProcessInfo(String pid) {
this.pid = pid;
// seeing this the first time.
this.age = 1;
}
public String getPid() {
return pid;
}
public String getName() {
return name;
}
public Integer getPgrpId() {
return pgrpId;
}
public String getPpid() {
return ppid;
}
public Integer getSessionId() {
return sessionId;
}
public Long getVmem() {
return vmem;
}
public Long getUtime() {
return utime;
}
public BigInteger getStime() {
return stime;
}
public Long getDtime() {
return dtime;
}
public Long getRssmemPage() { // get rss # of pages
return rssmemPage;
}
public int getAge() {
return age;
}
public boolean isParent(ProcessInfo p) {
if (pid.equals(p.getPpid())) {
return true;
}
return false;
}
public void updateProcessInfo(String name, String ppid, Integer pgrpId,
Integer sessionId, Long utime, BigInteger stime, Long vmem, Long rssmem) {
this.name = name;
this.ppid = ppid;
this.pgrpId = pgrpId;
this.sessionId = sessionId;
this.utime = utime;
this.stime = stime;
this.vmem = vmem;
this.rssmemPage = rssmem;
}
public void updateJiffy(ProcessInfo oldInfo) {
if (oldInfo == null) {
BigInteger sum = this.stime.add(BigInteger.valueOf(this.utime));
if (sum.compareTo(MAX_LONG) > 0) {
this.dtime = 0L;
LOG.warn("Sum of stime (" + this.stime + ") and utime (" + this.utime
+ ") is greater than " + Long.MAX_VALUE);
} else {
this.dtime = sum.longValue();
}
return;
}
this.dtime = (this.utime - oldInfo.utime +
this.stime.subtract(oldInfo.stime).longValue());
}
public void updateAge(ProcessInfo oldInfo) {
this.age = oldInfo.age + 1;
}
public boolean addChild(ProcessInfo p) {
return children.add(p);
}
public List<ProcessInfo> getChildren() {
return children;
}
public String getCmdLine(String procfsDir) {
String ret = "N/A";
if (pid == null) {
return ret;
}
BufferedReader in = null;
InputStreamReader fReader = null;
try {
fReader = new InputStreamReader(new FileInputStream(
new File(new File(procfsDir, pid), PROCFS_CMDLINE_FILE)),
Charsets.UTF_8);
} catch (FileNotFoundException f) {
// The process vanished in the interim!
return ret;
}
in = new BufferedReader(fReader);
try {
ret = in.readLine(); // only one line
if (ret == null) {
ret = "N/A";
} else {
ret = ret.replace('\0', ' '); // Replace each null char with a space
if (ret.equals("")) {
// The cmdline might be empty because the process is swapped out or
// is a zombie.
ret = "N/A";
}
}
} catch (IOException io) {
LOG.warn("Error reading the stream " + io);
ret = "N/A";
} finally {
// Close the streams
try {
fReader.close();
try {
in.close();
} catch (IOException i) {
LOG.warn("Error closing the stream " + in);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return ret;
}
}
}

View File

@ -1,165 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Plugin to calculate resource information on the system.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class ResourceCalculatorPlugin extends Configured {
/**
* Obtain the total size of the virtual memory present in the system.
*
* @return virtual memory size in bytes.
*/
public abstract long getVirtualMemorySize();
/**
* Obtain the total size of the physical memory present in the system.
*
* @return physical memory size bytes.
*/
public abstract long getPhysicalMemorySize();
/**
* Obtain the total size of the available virtual memory present
* in the system.
*
* @return available virtual memory size in bytes.
*/
public abstract long getAvailableVirtualMemorySize();
/**
* Obtain the total size of the available physical memory present
* in the system.
*
* @return available physical memory size bytes.
*/
public abstract long getAvailablePhysicalMemorySize();
/**
* Obtain the total number of processors present on the system.
*
* @return number of processors
*/
public abstract int getNumProcessors();
/**
* Obtain the CPU frequency of on the system.
*
* @return CPU frequency in kHz
*/
public abstract long getCpuFrequency();
/**
* Obtain the cumulative CPU time since the system is on.
*
* @return cumulative CPU time in milliseconds
*/
public abstract long getCumulativeCpuTime();
/**
* Obtain the CPU usage % of the machine. Return -1 if it is unavailable
*
* @return CPU usage in %
*/
public abstract float getCpuUsage();
/**
* Obtain resource status used by current process tree.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract ProcResourceValues getProcResourceValues();
public static class ProcResourceValues {
private final long cumulativeCpuTime;
private final long physicalMemorySize;
private final long virtualMemorySize;
public ProcResourceValues(long cumulativeCpuTime, long physicalMemorySize,
long virtualMemorySize) {
this.cumulativeCpuTime = cumulativeCpuTime;
this.physicalMemorySize = physicalMemorySize;
this.virtualMemorySize = virtualMemorySize;
}
/**
* Obtain the physical memory size used by current process tree.
* @return physical memory size in bytes.
*/
public long getPhysicalMemorySize() {
return physicalMemorySize;
}
/**
* Obtain the virtual memory size used by a current process tree.
* @return virtual memory size in bytes.
*/
public long getVirtualMemorySize() {
return virtualMemorySize;
}
/**
* Obtain the cumulative CPU time used by a current process tree.
* @return cumulative CPU time in milliseconds
*/
public long getCumulativeCpuTime() {
return cumulativeCpuTime;
}
}
/**
* Get the ResourceCalculatorPlugin from the class name and configure it. If
* class name is null, this method will try and return a memory calculator
* plugin available for this system.
*
* @param clazz class-name
* @param conf configure the plugin with this.
* @return ResourceCalculatorPlugin
*/
public static ResourceCalculatorPlugin getResourceCalculatorPlugin(
Class<? extends ResourceCalculatorPlugin> clazz, Configuration conf) {
if (clazz != null) {
return ReflectionUtils.newInstance(clazz, conf);
}
// No class given, try a os specific class
try {
String osName = System.getProperty("os.name");
if (osName.startsWith("Linux")) {
return new LinuxResourceCalculatorPlugin();
}
} catch (SecurityException se) {
// Failed to get Operating System name.
return null;
}
// Not supported on this system.
return null;
}
}

View File

@ -806,6 +806,14 @@
</description> </description>
</property> </property>
<property>
<name>mapreduce.am.max-attempts</name>
<value>1</value>
<description>The maximum number of application attempts. It is a
application-specific setting. It should not be larger than the global number
set by resourcemanager. Otherwise, it will be override.</description>
</property>
<!-- Job Notification Configuration --> <!-- Job Notification Configuration -->
<property> <property>
<name>mapreduce.job.end-notification.url</name> <name>mapreduce.job.end-notification.url</name>

View File

@ -1,3 +1,4 @@
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
@ -15,15 +16,23 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.yarn.server.api.records; package org.apache.hadoop.mapred;
public interface RegistrationResponse { import org.junit.Test;
MasterKey getMasterKey(); import static org.junit.Assert.*;
/**
* test Clock class
*
*/
public class TestClock {
void setMasterKey(MasterKey secretKey); @Test (timeout=1000)
public void testClock(){
Clock clock= new Clock();
long templateTime=System.currentTimeMillis();
long time=clock.getTime();
assertEquals(templateTime, time,30);
NodeAction getNodeAction(); }
void setNodeAction(NodeAction nodeAction);
} }

View File

@ -0,0 +1,155 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.util.regex.Pattern;
import static org.junit.Assert.*;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
/**
* test JobConf
*
*/
public class TestJobConf {
/**
* test getters and setters of JobConf
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobConf() {
JobConf conf = new JobConf();
// test default value
Pattern pattern = conf.getJarUnpackPattern();
assertEquals(Pattern.compile("(?:classes/|lib/).*").toString(),
pattern.toString());
// default value
assertFalse(conf.getKeepFailedTaskFiles());
conf.setKeepFailedTaskFiles(true);
assertTrue(conf.getKeepFailedTaskFiles());
// default value
assertNull(conf.getKeepTaskFilesPattern());
conf.setKeepTaskFilesPattern("123454");
assertEquals("123454", conf.getKeepTaskFilesPattern());
// default value
assertNotNull(conf.getWorkingDirectory());
conf.setWorkingDirectory(new Path("test"));
assertTrue(conf.getWorkingDirectory().toString().endsWith("test"));
// default value
assertEquals(1, conf.getNumTasksToExecutePerJvm());
// default value
assertNull(conf.getKeyFieldComparatorOption());
conf.setKeyFieldComparatorOptions("keySpec");
assertEquals("keySpec", conf.getKeyFieldComparatorOption());
// default value
assertFalse(conf.getUseNewReducer());
conf.setUseNewReducer(true);
assertTrue(conf.getUseNewReducer());
// default
assertTrue(conf.getMapSpeculativeExecution());
assertTrue(conf.getReduceSpeculativeExecution());
assertTrue(conf.getSpeculativeExecution());
conf.setReduceSpeculativeExecution(false);
assertTrue(conf.getSpeculativeExecution());
conf.setMapSpeculativeExecution(false);
assertFalse(conf.getSpeculativeExecution());
assertFalse(conf.getMapSpeculativeExecution());
assertFalse(conf.getReduceSpeculativeExecution());
conf.setSessionId("ses");
assertEquals("ses", conf.getSessionId());
assertEquals(3, conf.getMaxTaskFailuresPerTracker());
conf.setMaxTaskFailuresPerTracker(2);
assertEquals(2, conf.getMaxTaskFailuresPerTracker());
assertEquals(0, conf.getMaxMapTaskFailuresPercent());
conf.setMaxMapTaskFailuresPercent(50);
assertEquals(50, conf.getMaxMapTaskFailuresPercent());
assertEquals(0, conf.getMaxReduceTaskFailuresPercent());
conf.setMaxReduceTaskFailuresPercent(70);
assertEquals(70, conf.getMaxReduceTaskFailuresPercent());
// by default
assertEquals(JobPriority.NORMAL.name(), conf.getJobPriority().name());
conf.setJobPriority(JobPriority.HIGH);
assertEquals(JobPriority.HIGH.name(), conf.getJobPriority().name());
assertNull(conf.getJobSubmitHostName());
conf.setJobSubmitHostName("hostname");
assertEquals("hostname", conf.getJobSubmitHostName());
// default
assertNull(conf.getJobSubmitHostAddress());
conf.setJobSubmitHostAddress("ww");
assertEquals("ww", conf.getJobSubmitHostAddress());
// default value
assertFalse(conf.getProfileEnabled());
conf.setProfileEnabled(true);
assertTrue(conf.getProfileEnabled());
// default value
assertEquals(conf.getProfileTaskRange(true).toString(), "0-2");
assertEquals(conf.getProfileTaskRange(false).toString(), "0-2");
conf.setProfileTaskRange(true, "0-3");
assertEquals(conf.getProfileTaskRange(false).toString(), "0-2");
assertEquals(conf.getProfileTaskRange(true).toString(), "0-3");
// default value
assertNull(conf.getMapDebugScript());
conf.setMapDebugScript("mDbgScript");
assertEquals("mDbgScript", conf.getMapDebugScript());
// default value
assertNull(conf.getReduceDebugScript());
conf.setReduceDebugScript("rDbgScript");
assertEquals("rDbgScript", conf.getReduceDebugScript());
// default value
assertNull(conf.getJobLocalDir());
assertEquals("default", conf.getQueueName());
conf.setQueueName("qname");
assertEquals("qname", conf.getQueueName());
assertEquals(1, conf.computeNumSlotsPerMap(100L));
assertEquals(1, conf.computeNumSlotsPerReduce(100L));
conf.setMemoryForMapTask(100 * 1000);
assertEquals(1000, conf.computeNumSlotsPerMap(100L));
conf.setMemoryForReduceTask(1000 * 1000);
assertEquals(1000, conf.computeNumSlotsPerReduce(1000L));
assertEquals(-1, conf.getMaxPhysicalMemoryForTask());
assertEquals("The variable key is no longer used.",
JobConf.deprecatedString("key"));
}
}

View File

@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* test class JobInfo
*
*
*/
public class TestJobInfo {
@Test (timeout=5000)
public void testJobInfo() throws IOException {
JobID jid = new JobID("001", 1);
Text user = new Text("User");
Path path = new Path("/tmp/test");
JobInfo info = new JobInfo(jid, user, path);
ByteArrayOutputStream out = new ByteArrayOutputStream();
info.write(new DataOutputStream(out));
JobInfo copyinfo = new JobInfo();
copyinfo.readFields(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(info.getJobID().toString(), copyinfo.getJobID().toString());
assertEquals(info.getJobSubmitDir().getName(), copyinfo.getJobSubmitDir()
.getName());
assertEquals(info.getUser().toString(), copyinfo.getUser().toString());
}
}

View File

@ -0,0 +1,168 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import org.apache.hadoop.mapred.TaskCompletionEvent.Status;
import org.apache.hadoop.mapreduce.TaskType;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* Test deprecated methods
*
*/
public class TestOldMethodsJobID {
/**
* test deprecated methods of TaskID
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testDepricatedMethods() throws IOException {
JobID jid = new JobID();
TaskID test = new TaskID(jid, true, 1);
assertEquals(test.getTaskType(), TaskType.MAP);
test = new TaskID(jid, false, 1);
assertEquals(test.getTaskType(), TaskType.REDUCE);
test = new TaskID("001", 1, false, 1);
assertEquals(test.getTaskType(), TaskType.REDUCE);
test = new TaskID("001", 1, true, 1);
assertEquals(test.getTaskType(), TaskType.MAP);
ByteArrayOutputStream out = new ByteArrayOutputStream();
test.write(new DataOutputStream(out));
TaskID ti = TaskID.read(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(ti.toString(), test.toString());
assertEquals("task_001_0001_m_000002",
TaskID.getTaskIDsPattern("001", 1, true, 2));
assertEquals("task_003_0001_m_000004",
TaskID.getTaskIDsPattern("003", 1, TaskType.MAP, 4));
assertEquals("003_0001_m_000004",
TaskID.getTaskIDsPatternWOPrefix("003", 1, TaskType.MAP, 4).toString());
}
/**
* test JobID
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobID() throws IOException{
JobID jid = new JobID("001",2);
ByteArrayOutputStream out = new ByteArrayOutputStream();
jid.write(new DataOutputStream(out));
assertEquals(jid,JobID.read(new DataInputStream(new ByteArrayInputStream(out.toByteArray()))));
assertEquals("job_001_0001",JobID.getJobIDsPattern("001",1));
}
/**
* test deprecated methods of TaskCompletionEvent
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskCompletionEvent() {
TaskAttemptID taid = new TaskAttemptID("001", 1, TaskType.REDUCE, 2, 3);
TaskCompletionEvent template = new TaskCompletionEvent(12, taid, 13, true,
Status.SUCCEEDED, "httptracker");
TaskCompletionEvent testEl = TaskCompletionEvent.downgrade(template);
testEl.setTaskAttemptId(taid);
testEl.setTaskTrackerHttp("httpTracker");
testEl.setTaskId("attempt_001_0001_m_000002_04");
assertEquals("attempt_001_0001_m_000002_4",testEl.getTaskId());
testEl.setTaskStatus(Status.OBSOLETE);
assertEquals(Status.OBSOLETE.toString(), testEl.getStatus().toString());
testEl.setTaskRunTime(20);
assertEquals(testEl.getTaskRunTime(), 20);
testEl.setEventId(16);
assertEquals(testEl.getEventId(), 16);
}
/**
* test depricated methods of JobProfile
* @throws IOException
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testJobProfile() throws IOException {
JobProfile profile = new JobProfile("user", "job_001_03", "jobFile", "uri",
"name");
assertEquals("job_001_0003", profile.getJobId());
assertEquals("default", profile.getQueueName());
// serialization test
ByteArrayOutputStream out = new ByteArrayOutputStream();
profile.write(new DataOutputStream(out));
JobProfile profile2 = new JobProfile();
profile2.readFields(new DataInputStream(new ByteArrayInputStream(out
.toByteArray())));
assertEquals(profile2.name, profile.name);
assertEquals(profile2.jobFile, profile.jobFile);
assertEquals(profile2.queueName, profile.queueName);
assertEquals(profile2.url, profile.url);
assertEquals(profile2.user, profile.user);
}
/**
* test TaskAttemptID
*/
@SuppressWarnings( "deprecation" )
@Test (timeout=5000)
public void testTaskAttemptID (){
TaskAttemptID task = new TaskAttemptID("001",2,true,3,4);
assertEquals("attempt_001_0002_m_000003_4", TaskAttemptID.getTaskAttemptIDsPattern("001", 2, true, 3, 4));
assertEquals("task_001_0002_m_000003", task.getTaskID().toString());
assertEquals("attempt_001_0001_r_000002_3",TaskAttemptID.getTaskAttemptIDsPattern("001", 1, TaskType.REDUCE, 2, 3));
assertEquals("001_0001_m_000001_2", TaskAttemptID.getTaskAttemptIDsPatternWOPrefix("001",1, TaskType.MAP, 1, 2).toString());
}
/**
* test Reporter.NULL
*
*/
@Test (timeout=5000)
public void testReporter(){
Reporter nullReporter=Reporter.NULL;
assertNull(nullReporter.getCounter(null));
assertNull(nullReporter.getCounter("group", "name"));
// getInputSplit method removed
try{
assertNull(nullReporter.getInputSplit());
}catch(UnsupportedOperationException e){
assertEquals( "NULL reporter has no input",e.getMessage());
}
assertEquals(0,nullReporter.getProgress(),0.01);
}
}

View File

@ -0,0 +1,238 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test;
import static junit.framework.Assert.*;
import static org.mockito.Mockito.*;
/**
* TestCounters checks the sanity and recoverability of Queue
*/
public class TestQueue {
/**
* test QueueManager
* configuration from file
*
* @throws IOException
*/
@Test (timeout=5000)
public void testQueue() throws IOException {
File f = null;
try {
f = writeFile();
QueueManager manager = new QueueManager(f.getCanonicalPath(), true);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");
Queue root = manager.getRoot();
assertTrue(root.getChildren().size() == 2);
Iterator<Queue> iterator = root.getChildren().iterator();
Queue firstSubQueue = iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(
firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job")
.toString(),
"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue = iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(secondSubQueue.getProperties().getProperty("key"), "value");
assertEquals(secondSubQueue.getProperties().getProperty("key1"), "value1");
// test status
assertEquals(firstSubQueue.getState().getStateName(), "running");
assertEquals(secondSubQueue.getState().getStateName(), "stopped");
Set<String> template = new HashSet<String>();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(), template);
// test user access
UserGroupInformation mockUGI = mock(UserGroupInformation.class);
when(mockUGI.getShortUserName()).thenReturn("user1");
String[] groups = { "group1" };
when(mockUGI.getGroupNames()).thenReturn(groups);
assertTrue(manager.hasAccess("first", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("second", QueueACL.SUBMIT_JOB, mockUGI));
assertFalse(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
when(mockUGI.getShortUserName()).thenReturn("user3");
assertTrue(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI));
QueueAclsInfo[] qai = manager.getQueueAcls(mockUGI);
assertEquals(qai.length, 1);
// test refresh queue
manager.refreshQueues(getConfiguration(), null);
iterator = root.getChildren().iterator();
Queue firstSubQueue1 = iterator.next();
Queue secondSubQueue1 = iterator.next();
// tets equal method
assertTrue(firstSubQueue.equals(firstSubQueue1));
assertEquals(firstSubQueue1.getState().getStateName(), "running");
assertEquals(secondSubQueue1.getState().getStateName(), "stopped");
assertEquals(firstSubQueue1.getSchedulingInfo(), "queueInfo");
assertEquals(secondSubQueue1.getSchedulingInfo(), "queueInfoqueueInfo");
// test JobQueueInfo
assertEquals(firstSubQueue.getJobQueueInfo().getQueueName(), "first");
assertEquals(firstSubQueue.getJobQueueInfo().getQueueState(), "running");
assertEquals(firstSubQueue.getJobQueueInfo().getSchedulingInfo(),
"queueInfo");
assertEquals(secondSubQueue.getJobQueueInfo().getChildren().size(), 0);
// test
assertEquals(manager.getSchedulerInfo("first"), "queueInfo");
assertEquals(manager.getJobQueueInfos()[0].getQueueName(), secondSubQueue
.getJobQueueInfo().getQueueName());
assertEquals(manager.getJobQueueInfos()[1].getQueueName(), firstSubQueue
.getJobQueueInfo().getQueueName());
// test getJobQueueInfoMapping
assertEquals(
manager.getJobQueueInfoMapping().get("first").getQueueName(), "first");
// test dumpConfiguration
Writer writer = new StringWriter();
Configuration conf = getConfiguration();
conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY);
QueueManager.dumpConfiguration(writer, f.getAbsolutePath(), conf);
String result = writer.toString();
assertTrue(result
.indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0);
writer = new StringWriter();
QueueManager.dumpConfiguration(writer, conf);
result = writer.toString();
assertEquals(
"{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[{\"key\":\"capacity\",\"value\":\"20\"},{\"key\":\"user-limit\",\"value\":\"30\"}],\"children\":[]}]}]}",
result);
// test constructor QueueAclsInfo
QueueAclsInfo qi = new QueueAclsInfo();
assertNull(qi.getQueueName());
} finally {
if (f != null) {
f.delete();
}
}
}
private Configuration getConfiguration() {
Configuration conf = new Configuration();
conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY,
"first,second");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX
+ "first.acl-submit-job", "user1,user2 group1,group2");
conf.set(MRConfig.MR_ACLS_ENABLED, "true");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX + "first.state",
"running");
conf.set(QueueManager.QUEUE_CONF_PROPERTY_NAME_PREFIX + "second.state",
"stopped");
return conf;
}
@Test (timeout=5000)
public void testDefaultConfig() {
QueueManager manager = new QueueManager(true);
assertEquals(manager.getRoot().getChildren().size(), 2);
}
/**
* test for Qmanager with empty configuration
*
* @throws IOException
*/
@Test (timeout=5000)
public void test2Queue() throws IOException {
Configuration conf = getConfiguration();
QueueManager manager = new QueueManager(conf);
manager.setSchedulerInfo("first", "queueInfo");
manager.setSchedulerInfo("second", "queueInfoqueueInfo");
Queue root = manager.getRoot();
// test children queues
assertTrue(root.getChildren().size() == 2);
Iterator<Queue> iterator = root.getChildren().iterator();
Queue firstSubQueue = iterator.next();
assertTrue(firstSubQueue.getName().equals("first"));
assertEquals(
firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job")
.toString(),
"Users [user1, user2] and members of the groups [group1, group2] are allowed");
Queue secondSubQueue = iterator.next();
assertTrue(secondSubQueue.getName().equals("second"));
assertEquals(firstSubQueue.getState().getStateName(), "running");
assertEquals(secondSubQueue.getState().getStateName(), "stopped");
assertTrue(manager.isRunning("first"));
assertFalse(manager.isRunning("second"));
assertEquals(firstSubQueue.getSchedulingInfo(), "queueInfo");
assertEquals(secondSubQueue.getSchedulingInfo(), "queueInfoqueueInfo");
// test leaf queue
Set<String> template = new HashSet<String>();
template.add("first");
template.add("second");
assertEquals(manager.getLeafQueueNames(), template);
}
/**
* write cofiguration
* @return
* @throws IOException
*/
private File writeFile() throws IOException {
File f = null;
f = File.createTempFile("tst", "xml");
BufferedWriter out = new BufferedWriter(new FileWriter(f));
String properties = "<properties><property key=\"key\" value=\"value\"/><property key=\"key1\" value=\"value1\"/></properties>";
out.write("<queues>");
out.newLine();
out.write("<queue><name>first</name><acl-submit-job>user1,user2 group1,group2</acl-submit-job><acl-administer-jobs>user3,user4 group3,group4</acl-administer-jobs><state>running</state></queue>");
out.newLine();
out.write("<queue><name>second</name><acl-submit-job>u1,u2 g1,g2</acl-submit-job>"
+ properties + "<state>stopped</state></queue>");
out.newLine();
out.write("</queues>");
out.flush();
out.close();
return f;
}
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import static org.junit.Assert.*;
/**
* test SkipBadRecords
*
*
*/
public class TestSkipBadRecords {
@Test (timeout=5000)
public void testSkipBadRecords() {
// test default values
Configuration conf = new Configuration();
assertEquals(2, SkipBadRecords.getAttemptsToStartSkipping(conf));
assertTrue(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertTrue(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(0, SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(0, SkipBadRecords.getReducerMaxSkipGroups(conf), 0);
assertNull(SkipBadRecords.getSkipOutputPath(conf));
// test setters
SkipBadRecords.setAttemptsToStartSkipping(conf, 5);
SkipBadRecords.setAutoIncrMapperProcCount(conf, false);
SkipBadRecords.setAutoIncrReducerProcCount(conf, false);
SkipBadRecords.setMapperMaxSkipRecords(conf, 6L);
SkipBadRecords.setReducerMaxSkipGroups(conf, 7L);
JobConf jc= new JobConf();
SkipBadRecords.setSkipOutputPath(jc, new Path("test"));
// test getters
assertEquals(5, SkipBadRecords.getAttemptsToStartSkipping(conf));
assertFalse(SkipBadRecords.getAutoIncrMapperProcCount(conf));
assertFalse(SkipBadRecords.getAutoIncrReducerProcCount(conf));
assertEquals(6L, SkipBadRecords.getMapperMaxSkipRecords(conf));
assertEquals(7L, SkipBadRecords.getReducerMaxSkipGroups(conf), 0);
assertEquals("test",SkipBadRecords.getSkipOutputPath(jc).toString());
}
}

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.mapred.TaskLog.LogName;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.junit.Test;
import static junit.framework.Assert.*;
import static org.mockito.Mockito.*;
/**
* TestCounters checks the sanity and recoverability of Queue
*/
public class TestTaskLog {
/**
* test TaskAttemptID
*
* @throws IOException
*/
@Test (timeout=50000)
public void testTaskLog() throws IOException {
// test TaskLog
System.setProperty(MRJobConfig.TASK_LOG_DIR, "testString");
assertEquals(TaskLog.getMRv2LogDir(), "testString");
TaskAttemptID taid = mock(TaskAttemptID.class);
JobID jid = new JobID("job", 1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("testString/stdout"));
// test getRealTaskLogFileLocation
File indexFile = TaskLog.getIndexFile(taid, true);
if (!indexFile.getParentFile().exists()) {
indexFile.getParentFile().mkdirs();
}
indexFile.delete();
indexFile.createNewFile();
TaskLog.syncLogs("location", taid, true);
assertTrue(indexFile.getAbsolutePath().endsWith(
"userlogs/job_job_0001/JobId.cleanup/log.index"));
f = TaskLog.getRealTaskLogFileLocation(taid, true, LogName.DEBUGOUT);
if (f != null) {
assertTrue(f.getAbsolutePath().endsWith("location/debugout"));
FileUtils.copyFile(indexFile, f);
}
// test obtainLogDirOwner
assertTrue(TaskLog.obtainLogDirOwner(taid).length() > 0);
// test TaskLog.Reader
assertTrue(readTaskLog(TaskLog.LogName.DEBUGOUT, taid, true).length() > 0);
}
public String readTaskLog(TaskLog.LogName filter,
org.apache.hadoop.mapred.TaskAttemptID taskId, boolean isCleanup)
throws IOException {
// string buffer to store task log
StringBuffer result = new StringBuffer();
int res;
// reads the whole tasklog into inputstream
InputStream taskLogReader = new TaskLog.Reader(taskId, filter, 0, -1,
isCleanup);
// construct string log from inputstream.
byte[] b = new byte[65536];
while (true) {
res = taskLogReader.read(b);
if (res > 0) {
result.append(new String(b));
} else {
break;
}
}
taskLogReader.close();
// trim the string and return it
String str = result.toString();
str = str.trim();
return str;
}
/**
* test without TASK_LOG_DIR
*
* @throws IOException
*/
@Test (timeout=50000)
public void testTaskLogWithoutTaskLogDir() throws IOException {
// TaskLog tasklog= new TaskLog();
System.clearProperty(MRJobConfig.TASK_LOG_DIR);
// test TaskLog
assertEquals(TaskLog.getMRv2LogDir(), null);
TaskAttemptID taid = mock(TaskAttemptID.class);
JobID jid = new JobID("job", 1);
when(taid.getJobID()).thenReturn(jid);
when(taid.toString()).thenReturn("JobId");
File f = TaskLog.getTaskLogFile(taid, true, LogName.STDOUT);
assertTrue(f.getAbsolutePath().endsWith("stdout"));
}
}

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.StringWriter;
import java.io.Writer;
import org.apache.log4j.Category;
import org.apache.log4j.Layout;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.Priority;
import org.apache.log4j.spi.LoggingEvent;
import org.junit.Test;
import static org.junit.Assert.*;
public class TestTaskLogAppender {
/**
* test TaskLogAppender
*/
@SuppressWarnings("deprecation")
@Test (timeout=5000)
public void testTaskLogAppender(){
TaskLogAppender appender= new TaskLogAppender();
System.setProperty(TaskLogAppender.TASKID_PROPERTY,"attempt_01_02_m03_04_001");
System.setProperty(TaskLogAppender.LOGSIZE_PROPERTY, "1003");
appender.activateOptions();
assertEquals(appender.getTaskId(), "attempt_01_02_m03_04_001");
assertEquals(appender.getTotalLogFileSize(),1000);
assertEquals(appender.getIsCleanup(),false);
// test writer
Writer writer= new StringWriter();
appender.setWriter(writer);
Layout layout = new PatternLayout("%-5p [%t]: %m%n");
appender.setLayout(layout);
Category logger= Logger.getLogger(getClass().getName());
LoggingEvent event = new LoggingEvent("fqnOfCategoryClass", logger, Priority.INFO, "message", new Throwable());
appender.append(event);
appender.flush() ;
appender.close();
assertTrue(writer.toString().length()>0);
// test cleanup should not changed
appender= new TaskLogAppender();
appender.setIsCleanup(true);
appender.activateOptions();
assertEquals(appender.getIsCleanup(),true);
}
}

View File

@ -1,236 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Random;
import junit.framework.TestCase;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin;
import org.junit.Test;
/**
* A JUnit test to test {@link LinuxResourceCalculatorPlugin}
* Create the fake /proc/ information and verify the parsing and calculation
*/
public class TestLinuxResourceCalculatorPlugin extends TestCase {
/**
* LinuxResourceCalculatorPlugin with a fake timer
*/
static class FakeLinuxResourceCalculatorPlugin extends
LinuxResourceCalculatorPlugin {
long currentTime = 0;
public FakeLinuxResourceCalculatorPlugin(String procfsMemFile,
String procfsCpuFile,
String procfsStatFile,
long jiffyLengthInMillis) {
super(procfsMemFile, procfsCpuFile, procfsStatFile, jiffyLengthInMillis);
}
@Override
long getCurrentTime() {
return currentTime;
}
public void advanceTime(long adv) {
currentTime += adv * jiffyLengthInMillis;
}
}
private static final FakeLinuxResourceCalculatorPlugin plugin;
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString().replace(' ', '+');
private static final String FAKE_MEMFILE;
private static final String FAKE_CPUFILE;
private static final String FAKE_STATFILE;
private static final long FAKE_JIFFY_LENGTH = 10L;
static {
int randomNum = (new Random()).nextInt(1000000000);
FAKE_MEMFILE = TEST_ROOT_DIR + File.separator + "MEMINFO_" + randomNum;
FAKE_CPUFILE = TEST_ROOT_DIR + File.separator + "CPUINFO_" + randomNum;
FAKE_STATFILE = TEST_ROOT_DIR + File.separator + "STATINFO_" + randomNum;
plugin = new FakeLinuxResourceCalculatorPlugin(FAKE_MEMFILE, FAKE_CPUFILE,
FAKE_STATFILE,
FAKE_JIFFY_LENGTH);
}
static final String MEMINFO_FORMAT =
"MemTotal: %d kB\n" +
"MemFree: %d kB\n" +
"Buffers: 138244 kB\n" +
"Cached: 947780 kB\n" +
"SwapCached: 142880 kB\n" +
"Active: 3229888 kB\n" +
"Inactive: %d kB\n" +
"SwapTotal: %d kB\n" +
"SwapFree: %d kB\n" +
"Dirty: 122012 kB\n" +
"Writeback: 0 kB\n" +
"AnonPages: 2710792 kB\n" +
"Mapped: 24740 kB\n" +
"Slab: 132528 kB\n" +
"SReclaimable: 105096 kB\n" +
"SUnreclaim: 27432 kB\n" +
"PageTables: 11448 kB\n" +
"NFS_Unstable: 0 kB\n" +
"Bounce: 0 kB\n" +
"CommitLimit: 4125904 kB\n" +
"Committed_AS: 4143556 kB\n" +
"VmallocTotal: 34359738367 kB\n" +
"VmallocUsed: 1632 kB\n" +
"VmallocChunk: 34359736375 kB\n" +
"HugePages_Total: 0\n" +
"HugePages_Free: 0\n" +
"HugePages_Rsvd: 0\n" +
"Hugepagesize: 2048 kB";
static final String CPUINFO_FORMAT =
"processor : %s\n" +
"vendor_id : AuthenticAMD\n" +
"cpu family : 15\n" +
"model : 33\n" +
"model name : Dual Core AMD Opteron(tm) Processor 280\n" +
"stepping : 2\n" +
"cpu MHz : %f\n" +
"cache size : 1024 KB\n" +
"physical id : 0\n" +
"siblings : 2\n" +
"core id : 0\n" +
"cpu cores : 2\n" +
"fpu : yes\n" +
"fpu_exception : yes\n" +
"cpuid level : 1\n" +
"wp : yes\n" +
"flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov " +
"pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt lm " +
"3dnowext 3dnow pni lahf_lm cmp_legacy\n" +
"bogomips : 4792.41\n" +
"TLB size : 1024 4K pages\n" +
"clflush size : 64\n" +
"cache_alignment : 64\n" +
"address sizes : 40 bits physical, 48 bits virtual\n" +
"power management: ts fid vid ttp";
static final String STAT_FILE_FORMAT =
"cpu %d %d %d 1646495089 831319 48713 164346 0\n" +
"cpu0 15096055 30805 3823005 411456015 206027 13 14269 0\n" +
"cpu1 14760561 89890 6432036 408707910 456857 48074 130857 0\n" +
"cpu2 12761169 20842 3758639 413976772 98028 411 10288 0\n" +
"cpu3 12355207 47322 5789691 412354390 70406 213 8931 0\n" +
"intr 114648668 20010764 2 0 945665 2 0 0 0 0 0 0 0 4 0 0 0 0 0 0\n" +
"ctxt 242017731764\n" +
"btime 1257808753\n" +
"processes 26414943\n" +
"procs_running 1\n" +
"procs_blocked 0\n";
/**
* Test parsing /proc/stat and /proc/cpuinfo
* @throws IOException
*/
@Test
public void testParsingProcStatAndCpuFile() throws IOException {
// Write fake /proc/cpuinfo file.
long numProcessors = 8;
long cpuFrequencyKHz = 2392781;
String fileContent = "";
for (int i = 0; i < numProcessors; i++) {
fileContent += String.format(CPUINFO_FORMAT, i, cpuFrequencyKHz / 1000D) +
"\n";
}
File tempFile = new File(FAKE_CPUFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_CPUFILE);
fWriter.write(fileContent);
fWriter.close();
assertEquals(plugin.getNumProcessors(), numProcessors);
assertEquals(plugin.getCpuFrequency(), cpuFrequencyKHz);
// Write fake /proc/stat file.
long uTime = 54972994;
long nTime = 188860;
long sTime = 19803373;
tempFile = new File(FAKE_STATFILE);
tempFile.deleteOnExit();
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), (float)(LinuxResourceCalculatorPlugin.UNAVAILABLE));
// Advance the time and sample again to test the CPU usage calculation
uTime += 100L;
plugin.advanceTime(200L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), 6.25F);
// Advance the time and sample again. This time, we call getCpuUsage() only.
uTime += 600L;
plugin.advanceTime(300L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCpuUsage(), 25F);
// Advance very short period of time (one jiffy length).
// In this case, CPU usage should not be updated.
uTime += 1L;
plugin.advanceTime(1L);
updateStatFile(uTime, nTime, sTime);
assertEquals(plugin.getCumulativeCpuTime(),
FAKE_JIFFY_LENGTH * (uTime + nTime + sTime));
assertEquals(plugin.getCpuUsage(), 25F); // CPU usage is not updated.
}
/**
* Write information to fake /proc/stat file
*/
private void updateStatFile(long uTime, long nTime, long sTime)
throws IOException {
FileWriter fWriter = new FileWriter(FAKE_STATFILE);
fWriter.write(String.format(STAT_FILE_FORMAT, uTime, nTime, sTime));
fWriter.close();
}
/**
* Test parsing /proc/meminfo
* @throws IOException
*/
@Test
public void testParsingProcMemFile() throws IOException {
long memTotal = 4058864L;
long memFree = 99632L;
long inactive = 567732L;
long swapTotal = 2096472L;
long swapFree = 1818480L;
File tempFile = new File(FAKE_MEMFILE);
tempFile.deleteOnExit();
FileWriter fWriter = new FileWriter(FAKE_MEMFILE);
fWriter.write(String.format(MEMINFO_FORMAT,
memTotal, memFree, inactive, swapTotal, swapFree));
fWriter.close();
assertEquals(plugin.getAvailablePhysicalMemorySize(),
1024L * (memFree + inactive));
assertEquals(plugin.getAvailableVirtualMemorySize(),
1024L * (memFree + inactive + swapFree));
assertEquals(plugin.getPhysicalMemorySize(), 1024L * memTotal);
assertEquals(plugin.getVirtualMemorySize(), 1024L * (memTotal + swapTotal));
}
}

View File

@ -0,0 +1,69 @@
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- This is the template for queue configuration. The format supports nesting of
queues within queues - a feature called hierarchical queues. All queues are
defined within the 'queues' tag which is the top level element for this
XML document.
The 'aclsEnabled' attribute should be set to true, if ACLs should be checked
on queue operations such as submitting jobs, killing jobs etc. -->
<queues aclsEnabled="false">
<!-- Configuration for a queue is specified by defining a 'queue' element. -->
<queue>
<!-- Name of a queue. Queue name cannot contain a ':' -->
<name>default</name>
<!-- properties for a queue, typically used by schedulers,
can be defined here -->
<properties>
</properties>
<!-- State of the queue. If running, the queue will accept new jobs.
If stopped, the queue will not accept new jobs. -->
<state>running</state>
<!-- Specifies the ACLs to check for submitting jobs to this queue.
If set to '*', it allows all users to submit jobs to the queue.
For specifying a list of users and groups the format to use is
user1,user2 group1,group2 -->
<acl-submit-job>*</acl-submit-job>
<!-- Specifies the ACLs to check for modifying jobs in this queue.
Modifications include killing jobs, tasks of jobs or changing
priorities.
If set to '*', it allows all users to submit jobs to the queue.
For specifying a list of users and groups the format to use is
user1,user2 group1,group2 -->
<acl-administer-jobs>*</acl-administer-jobs>
</queue>
<!-- Here is a sample of a hierarchical queue configuration
where q2 is a child of q1. In this example, q2 is a leaf level
queue as it has no queues configured within it. Currently, ACLs
and state are only supported for the leaf level queues.
Note also the usage of properties for the queue q2. -->
<queue>
<name>q1</name>
<queue>
<name>q2</name>
<properties>
<property key="capacity" value="20"/>
<property key="user-limit" value="30"/>
</properties>
</queue>
</queue>
</queues>

View File

@ -481,6 +481,9 @@ public class YARNRunner implements ClientProtocol {
appContext.setCancelTokensWhenComplete( appContext.setCancelTokensWhenComplete(
conf.getBoolean(MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN, true)); conf.getBoolean(MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN, true));
appContext.setAMContainerSpec(amContainer); // AM Container appContext.setAMContainerSpec(amContainer); // AM Container
appContext.setMaxAppAttempts(
conf.getInt(MRJobConfig.MR_AM_MAX_ATTEMPTS,
MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS));
return appContext; return appContext;
} }

View File

@ -26,12 +26,12 @@ import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.io.compress.GzipCodec;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*;
public class TestIFile { public class TestIFile {
@Test @Test
/** /**
* Create an IFile.Writer using GzipCodec since this codec does not * Create an IFile.Writer using GzipCodec since this code does not
* have a compressor when run via the tests (ie no native libraries). * have a compressor when run via the tests (ie no native libraries).
*/ */
public void testIFileWriterWithCodec() throws Exception { public void testIFileWriterWithCodec() throws Exception {
@ -63,5 +63,11 @@ public class TestIFile {
IFile.Reader<Text, Text> reader = IFile.Reader<Text, Text> reader =
new IFile.Reader<Text, Text>(conf, rfs, path, codec, null); new IFile.Reader<Text, Text>(conf, rfs, path, codec, null);
reader.close(); reader.close();
// test check sum
byte[] ab= new byte[100];
int readed= reader.checksumIn.readWithChecksum(ab, 0, ab.length);
assertEquals( readed,reader.checksumIn.getChecksum().length);
} }
} }

View File

@ -21,13 +21,20 @@ import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.DataInputStream; import java.io.DataInputStream;
import java.io.DataOutputStream; import java.io.DataOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.Arrays; import java.util.Arrays;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
/**
*
* test MultiFileSplit class
*/
public class TestMultiFileSplit extends TestCase{ public class TestMultiFileSplit extends TestCase{
public void testReadWrite() throws Exception { public void testReadWrite() throws Exception {
@ -58,4 +65,26 @@ public class TestMultiFileSplit extends TestCase{
assertTrue(Arrays.equals(split.getLengths(), readSplit.getLengths())); assertTrue(Arrays.equals(split.getLengths(), readSplit.getLengths()));
System.out.println(split.toString()); System.out.println(split.toString());
} }
/**
* test method getLocations
* @throws IOException
*/
public void testgetLocations() throws IOException{
JobConf job= new JobConf();
File tmpFile = File.createTempFile("test","txt");
tmpFile.createNewFile();
OutputStream out=new FileOutputStream(tmpFile);
out.write("tempfile".getBytes());
out.flush();
out.close();
Path[] path= {new Path(tmpFile.getAbsolutePath())};
long[] lengths = {100};
MultiFileSplit split = new MultiFileSplit(job,path,lengths);
String [] locations= split.getLocations();
assertTrue(locations.length==1);
assertEquals(locations[0], "localhost");
}
} }

View File

@ -18,24 +18,37 @@
package org.apache.hadoop.mapred; package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.*;
import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.DataOutputStream;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo;
import org.apache.hadoop.mapred.JobClient.NetworkedJob;
import org.apache.hadoop.mapred.JobClient.TaskStatusFilter;
import org.apache.hadoop.mapred.lib.IdentityMapper; import org.apache.hadoop.mapred.lib.IdentityMapper;
import org.apache.hadoop.mapred.lib.IdentityReducer; import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.yarn.YarnException;
import org.junit.Test; import org.junit.Test;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
public class TestNetworkedJob { public class TestNetworkedJob {
private static String TEST_ROOT_DIR = new File(System.getProperty( private static String TEST_ROOT_DIR = new File(System.getProperty(
@ -44,8 +57,7 @@ public class TestNetworkedJob {
private static Path inFile = new Path(testDir, "in"); private static Path inFile = new Path(testDir, "in");
private static Path outDir = new Path(testDir, "out"); private static Path outDir = new Path(testDir, "out");
@SuppressWarnings("deprecation") @Test (timeout=5000)
@Test
public void testGetNullCounters() throws Exception { public void testGetNullCounters() throws Exception {
//mock creation //mock creation
Job mockJob = mock(Job.class); Job mockJob = mock(Job.class);
@ -57,7 +69,7 @@ public class TestNetworkedJob {
verify(mockJob).getCounters(); verify(mockJob).getCounters();
} }
@Test @Test (timeout=500000)
public void testGetJobStatus() throws IOException, InterruptedException, public void testGetJobStatus() throws IOException, InterruptedException,
ClassNotFoundException { ClassNotFoundException {
MiniMRClientCluster mr = null; MiniMRClientCluster mr = null;
@ -105,4 +117,278 @@ public class TestNetworkedJob {
} }
} }
} }
/**
* test JobConf
* @throws Exception
*/
@SuppressWarnings( "deprecation" )
@Test (timeout=500000)
public void testNetworkedJob() throws Exception {
// mock creation
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
try {
Configuration conf = new Configuration();
mr = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
RunningJob rj = client.submitJob(job);
JobID jobId = rj.getID();
NetworkedJob runningJob = (NetworkedJob) client.getJob(jobId);
runningJob.setJobPriority(JobPriority.HIGH.name());
// test getters
assertTrue(runningJob.getConfiguration().toString()
.endsWith("0001/job.xml"));
assertEquals(runningJob.getID(), jobId);
assertEquals(runningJob.getJobID(), jobId.toString());
assertEquals(runningJob.getJobName(), "N/A");
assertTrue(runningJob.getJobFile().endsWith(
".staging/" + runningJob.getJobID() + "/job.xml"));
assertTrue(runningJob.getTrackingURL().length() > 0);
assertTrue(runningJob.mapProgress() == 0.0f);
assertTrue(runningJob.reduceProgress() == 0.0f);
assertTrue(runningJob.cleanupProgress() == 0.0f);
assertTrue(runningJob.setupProgress() == 0.0f);
TaskCompletionEvent[] tce = runningJob.getTaskCompletionEvents(0);
assertEquals(tce.length, 0);
assertEquals(runningJob.getHistoryUrl(),"");
assertFalse(runningJob.isRetired());
assertEquals( runningJob.getFailureInfo(),"");
assertEquals(runningJob.getJobStatus().getJobName(), "N/A");
assertEquals(client.getMapTaskReports(jobId).length, 0);
try {
client.getSetupTaskReports(jobId);
} catch (YarnException e) {
assertEquals(e.getMessage(), "Unrecognized task type: JOB_SETUP");
}
try {
client.getCleanupTaskReports(jobId);
} catch (YarnException e) {
assertEquals(e.getMessage(), "Unrecognized task type: JOB_CLEANUP");
}
assertEquals(client.getReduceTaskReports(jobId).length, 0);
// test ClusterStatus
ClusterStatus status = client.getClusterStatus(true);
assertEquals(status.getActiveTrackerNames().size(), 2);
// it method does not implemented and always return empty array or null;
assertEquals(status.getBlacklistedTrackers(), 0);
assertEquals(status.getBlacklistedTrackerNames().size(), 0);
assertEquals(status.getBlackListedTrackersInfo().size(), 0);
assertEquals(status.getJobTrackerStatus(), JobTrackerStatus.RUNNING);
assertEquals(status.getMapTasks(), 1);
assertEquals(status.getMaxMapTasks(), 20);
assertEquals(status.getMaxReduceTasks(), 4);
assertEquals(status.getNumExcludedNodes(), 0);
assertEquals(status.getReduceTasks(), 1);
assertEquals(status.getTaskTrackers(), 2);
assertEquals(status.getTTExpiryInterval(), 0);
assertEquals(status.getJobTrackerStatus(), JobTrackerStatus.RUNNING);
// test read and write
ByteArrayOutputStream dataOut = new ByteArrayOutputStream();
status.write(new DataOutputStream(dataOut));
ClusterStatus status2 = new ClusterStatus();
status2.readFields(new DataInputStream(new ByteArrayInputStream(dataOut
.toByteArray())));
assertEquals(status.getActiveTrackerNames(),
status2.getActiveTrackerNames());
assertEquals(status.getBlackListedTrackersInfo(),
status2.getBlackListedTrackersInfo());
assertEquals(status.getMapTasks(), status2.getMapTasks());
try {
} catch (RuntimeException e) {
assertTrue(e.getMessage().endsWith("not found on CLASSPATH"));
}
// test taskStatusfilter
JobClient.setTaskOutputFilter(job, TaskStatusFilter.ALL);
assertEquals(JobClient.getTaskOutputFilter(job), TaskStatusFilter.ALL);
// runningJob.setJobPriority(JobPriority.HIGH.name());
// test default map
assertEquals(client.getDefaultMaps(), 20);
assertEquals(client.getDefaultReduces(), 4);
assertEquals(client.getSystemDir().getName(), "jobSubmitDir");
// test queue information
JobQueueInfo[] rootQueueInfo = client.getRootQueues();
assertEquals(rootQueueInfo.length, 1);
assertEquals(rootQueueInfo[0].getQueueName(), "default");
JobQueueInfo[] qinfo = client.getQueues();
assertEquals(qinfo.length, 1);
assertEquals(qinfo[0].getQueueName(), "default");
assertEquals(client.getChildQueues("default").length, 0);
assertEquals(client.getJobsFromQueue("default").length, 1);
assertTrue(client.getJobsFromQueue("default")[0].getJobFile().endsWith(
"/job.xml"));
JobQueueInfo qi = client.getQueueInfo("default");
assertEquals(qi.getQueueName(), "default");
assertEquals(qi.getQueueState(), "running");
QueueAclsInfo[] aai = client.getQueueAclsForCurrentUser();
assertEquals(aai.length, 2);
assertEquals(aai[0].getQueueName(), "root");
assertEquals(aai[1].getQueueName(), "default");
// test token
Token<DelegationTokenIdentifier> token = client
.getDelegationToken(new Text(UserGroupInformation.getCurrentUser()
.getShortUserName()));
assertEquals(token.getKind().toString(), "RM_DELEGATION_TOKEN");
// test JobClient
// The following asserts read JobStatus twice and ensure the returned
// JobStatus objects correspond to the same Job.
assertEquals("Expected matching JobIDs", jobId, client.getJob(jobId)
.getJobStatus().getJobID());
assertEquals("Expected matching startTimes", rj.getJobStatus()
.getStartTime(), client.getJob(jobId).getJobStatus().getStartTime());
} finally {
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
/**
* test BlackListInfo class
*
* @throws IOException
*/
@Test (timeout=5000)
public void testBlackListInfo() throws IOException {
BlackListInfo info = new BlackListInfo();
info.setBlackListReport("blackListInfo");
info.setReasonForBlackListing("reasonForBlackListing");
info.setTrackerName("trackerName");
ByteArrayOutputStream byteOut = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(byteOut);
info.write(out);
BlackListInfo info2 = new BlackListInfo();
info2.readFields(new DataInputStream(new ByteArrayInputStream(byteOut
.toByteArray())));
assertEquals(info, info);
assertEquals(info.toString(), info.toString());
assertEquals(info.getTrackerName(), "trackerName");
assertEquals(info.getReasonForBlackListing(), "reasonForBlackListing");
assertEquals(info.getBlackListReport(), "blackListInfo");
}
/**
* test run from command line JobQueueClient
* @throws Exception
*/
@Test (timeout=500000)
public void testJobQueueClient() throws Exception {
MiniMRClientCluster mr = null;
FileSystem fileSys = null;
PrintStream oldOut = System.out;
try {
Configuration conf = new Configuration();
mr = MiniMRClientClusterFactory.create(this.getClass(), 2, conf);
JobConf job = new JobConf(mr.getConfig());
fileSys = FileSystem.get(job);
fileSys.delete(testDir, true);
FSDataOutputStream out = fileSys.create(inFile, true);
out.writeBytes("This is a test file");
out.close();
FileInputFormat.setInputPaths(job, inFile);
FileOutputFormat.setOutputPath(job, outDir);
job.setInputFormat(TextInputFormat.class);
job.setOutputFormat(TextOutputFormat.class);
job.setMapperClass(IdentityMapper.class);
job.setReducerClass(IdentityReducer.class);
job.setNumReduceTasks(0);
JobClient client = new JobClient(mr.getConfig());
client.submitJob(job);
JobQueueClient jobClient = new JobQueueClient(job);
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg = { "-list" };
jobClient.run(arg);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg1 = { "-showacls" };
jobClient.run(arg1);
assertTrue(bytes.toString().contains("Queue acls for user :"));
assertTrue(bytes.toString().contains(
"root ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
assertTrue(bytes.toString().contains(
"default ADMINISTER_QUEUE,SUBMIT_APPLICATIONS"));
// test for info and default queue
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg2 = { "-info", "default" };
jobClient.run(arg2);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
// test for info , default queue and jobs
bytes = new ByteArrayOutputStream();
System.setOut(new PrintStream(bytes));
String[] arg3 = { "-info", "default", "-showJobs" };
jobClient.run(arg3);
assertTrue(bytes.toString().contains("Queue Name : default"));
assertTrue(bytes.toString().contains("Queue State : running"));
assertTrue(bytes.toString().contains("Scheduling Info"));
assertTrue(bytes.toString().contains("job_1"));
String[] arg4 = {};
jobClient.run(arg4);
} finally {
System.setOut(oldOut);
if (fileSys != null) {
fileSys.delete(testDir, true);
}
if (mr != null) {
mr.stop();
}
}
}
} }

View File

@ -0,0 +1,74 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapred;
import java.io.StringWriter;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
import static org.junit.Assert.*;
import org.junit.Test;
public class TestQueueConfigurationParser {
/**
* test xml generation
* @throws ParserConfigurationException
* @throws Exception
*/
@Test (timeout=5000)
public void testQueueConfigurationParser()
throws ParserConfigurationException, Exception {
JobQueueInfo info = new JobQueueInfo("root", "rootInfo");
JobQueueInfo infoChild1 = new JobQueueInfo("child1", "child1Info");
JobQueueInfo infoChild2 = new JobQueueInfo("child2", "child1Info");
info.addChild(infoChild1);
info.addChild(infoChild2);
DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory
.newInstance();
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
Document document = builder.newDocument();
// test QueueConfigurationParser.getQueueElement
Element e = QueueConfigurationParser.getQueueElement(document, info);
// transform result to string for check
DOMSource domSource = new DOMSource(e);
StringWriter writer = new StringWriter();
StreamResult result = new StreamResult(writer);
TransformerFactory tf = TransformerFactory.newInstance();
Transformer transformer = tf.newTransformer();
transformer.transform(domSource, result);
String str= writer.toString();
assertTrue(str
.endsWith("<queue><name>root</name><properties/><state>running</state><queue><name>child1</name><properties/><state>running</state></queue><queue><name>child2</name><properties/><state>running</state></queue></queue>"));
}
}

View File

@ -17,6 +17,8 @@
*/ */
package org.apache.hadoop.mapred; package org.apache.hadoop.mapred;
import java.util.Map;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.hadoop.mapred.StatisticsCollector.TimeWindow; import org.apache.hadoop.mapred.StatisticsCollector.TimeWindow;
@ -24,6 +26,7 @@ import org.apache.hadoop.mapred.StatisticsCollector.Stat;
public class TestStatisticsCollector extends TestCase{ public class TestStatisticsCollector extends TestCase{
@SuppressWarnings("rawtypes")
public void testMovingWindow() throws Exception { public void testMovingWindow() throws Exception {
StatisticsCollector collector = new StatisticsCollector(1); StatisticsCollector collector = new StatisticsCollector(1);
TimeWindow window = new TimeWindow("test", 6, 2); TimeWindow window = new TimeWindow("test", 6, 2);
@ -78,6 +81,28 @@ public class TestStatisticsCollector extends TestCase{
collector.update(); collector.update();
assertEquals((10+10+10+12+13+14), stat.getValues().get(window).getValue()); assertEquals((10+10+10+12+13+14), stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue()); assertEquals(95, stat.getValues().get(sincStart).getValue());
// test Stat class
Map updaters= collector.getUpdaters();
assertEquals(updaters.size(),2);
Map<String, Stat> ststistics=collector.getStatistics();
assertNotNull(ststistics.get("m1"));
Stat newStat= collector.createStat("m2");
assertEquals(newStat.name, "m2");
Stat st=collector.removeStat("m1");
assertEquals(st.name, "m1");
assertEquals((10+10+10+12+13+14), stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue());
st=collector.removeStat("m1");
// try to remove stat again
assertNull(st);
collector.start();
// waiting 2,5 sec
Thread.sleep(2500);
assertEquals(69, stat.getValues().get(window).getValue());
assertEquals(95, stat.getValues().get(sincStart).getValue());
} }
} }

View File

@ -61,11 +61,12 @@ public class TestTextInputFormat {
throw new RuntimeException("init failure", e); throw new RuntimeException("init failure", e);
} }
} }
@SuppressWarnings("deprecation")
private static Path workDir = private static Path workDir =
new Path(new Path(System.getProperty("test.build.data", "/tmp")), new Path(new Path(System.getProperty("test.build.data", "/tmp")),
"TestTextInputFormat").makeQualified(localFs); "TestTextInputFormat").makeQualified(localFs);
@Test @Test (timeout=500000)
public void testFormat() throws Exception { public void testFormat() throws Exception {
JobConf job = new JobConf(defaultConf); JobConf job = new JobConf(defaultConf);
Path file = new Path(workDir, "test.txt"); Path file = new Path(workDir, "test.txt");
@ -145,7 +146,7 @@ public class TestTextInputFormat {
} }
} }
@Test @Test (timeout=900000)
public void testSplitableCodecs() throws IOException { public void testSplitableCodecs() throws IOException {
JobConf conf = new JobConf(defaultConf); JobConf conf = new JobConf(defaultConf);
int seed = new Random().nextInt(); int seed = new Random().nextInt();
@ -250,7 +251,7 @@ public class TestTextInputFormat {
bufsz); bufsz);
} }
@Test @Test (timeout=5000)
public void testUTF8() throws Exception { public void testUTF8() throws Exception {
LineReader in = makeStream("abcd\u20acbdcd\u20ac"); LineReader in = makeStream("abcd\u20acbdcd\u20ac");
Text line = new Text(); Text line = new Text();
@ -269,7 +270,7 @@ public class TestTextInputFormat {
* *
* @throws Exception * @throws Exception
*/ */
@Test @Test (timeout=5000)
public void testNewLines() throws Exception { public void testNewLines() throws Exception {
final String STR = "a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee"; final String STR = "a\nbb\n\nccc\rdddd\r\r\r\n\r\neeeee";
final int STRLENBYTES = STR.getBytes().length; final int STRLENBYTES = STR.getBytes().length;
@ -309,7 +310,7 @@ public class TestTextInputFormat {
* *
* @throws Exception * @throws Exception
*/ */
@Test @Test (timeout=5000)
public void testMaxLineLength() throws Exception { public void testMaxLineLength() throws Exception {
final String STR = "a\nbb\n\nccc\rdddd\r\neeeee"; final String STR = "a\nbb\n\nccc\rdddd\r\neeeee";
final int STRLENBYTES = STR.getBytes().length; final int STRLENBYTES = STR.getBytes().length;
@ -334,7 +335,7 @@ public class TestTextInputFormat {
} }
} }
@Test @Test (timeout=5000)
public void testMRMaxLine() throws Exception { public void testMRMaxLine() throws Exception {
final int MAXPOS = 1024 * 1024; final int MAXPOS = 1024 * 1024;
final int MAXLINE = 10 * 1024; final int MAXLINE = 10 * 1024;
@ -354,6 +355,9 @@ public class TestTextInputFormat {
position += b.length; position += b.length;
return b.length; return b.length;
} }
public void reset() {
position=0;
}
}; };
final LongWritable key = new LongWritable(); final LongWritable key = new LongWritable();
final Text val = new Text(); final Text val = new Text();
@ -362,8 +366,14 @@ public class TestTextInputFormat {
conf.setInt(org.apache.hadoop.mapreduce.lib.input. conf.setInt(org.apache.hadoop.mapreduce.lib.input.
LineRecordReader.MAX_LINE_LENGTH, MAXLINE); LineRecordReader.MAX_LINE_LENGTH, MAXLINE);
conf.setInt("io.file.buffer.size", BUF); // used by LRR conf.setInt("io.file.buffer.size", BUF); // used by LRR
final LineRecordReader lrr = new LineRecordReader(infNull, 0, MAXPOS, conf); // test another constructor
LineRecordReader lrr = new LineRecordReader(infNull, 0, MAXPOS, conf);
assertFalse("Read a line from null", lrr.next(key, val)); assertFalse("Read a line from null", lrr.next(key, val));
infNull.reset();
lrr = new LineRecordReader(infNull, 0L, MAXLINE, MAXPOS);
assertFalse("Read a line from null", lrr.next(key, val));
} }
private static void writeFile(FileSystem fs, Path name, private static void writeFile(FileSystem fs, Path name,
@ -400,7 +410,7 @@ public class TestTextInputFormat {
/** /**
* Test using the gzip codec for reading * Test using the gzip codec for reading
*/ */
@Test @Test (timeout=5000)
public void testGzip() throws IOException { public void testGzip() throws IOException {
JobConf job = new JobConf(defaultConf); JobConf job = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec(); CompressionCodec gzip = new GzipCodec();
@ -434,7 +444,7 @@ public class TestTextInputFormat {
/** /**
* Test using the gzip codec and an empty input file * Test using the gzip codec and an empty input file
*/ */
@Test @Test (timeout=5000)
public void testGzipEmpty() throws IOException { public void testGzipEmpty() throws IOException {
JobConf job = new JobConf(defaultConf); JobConf job = new JobConf(defaultConf);
CompressionCodec gzip = new GzipCodec(); CompressionCodec gzip = new GzipCodec();

View File

@ -44,7 +44,6 @@ public class TestTextOutputFormat extends TestCase {
"data"), "data"),
FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt); FileOutputCommitter.TEMP_DIR_NAME), "_" + attempt);
@SuppressWarnings("unchecked")
public void testFormat() throws Exception { public void testFormat() throws Exception {
JobConf job = new JobConf(); JobConf job = new JobConf();
job.set(JobContext.TASK_ATTEMPT_ID, attempt); job.set(JobContext.TASK_ATTEMPT_ID, attempt);
@ -59,8 +58,8 @@ public class TestTextOutputFormat extends TestCase {
// A reporter that does nothing // A reporter that does nothing
Reporter reporter = Reporter.NULL; Reporter reporter = Reporter.NULL;
TextOutputFormat theOutputFormat = new TextOutputFormat(); TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter theRecordWriter = RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter); theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1"); Text key1 = new Text("key1");
@ -95,7 +94,6 @@ public class TestTextOutputFormat extends TestCase {
} }
@SuppressWarnings("unchecked")
public void testFormatWithCustomSeparator() throws Exception { public void testFormatWithCustomSeparator() throws Exception {
JobConf job = new JobConf(); JobConf job = new JobConf();
String separator = "\u0001"; String separator = "\u0001";
@ -112,8 +110,8 @@ public class TestTextOutputFormat extends TestCase {
// A reporter that does nothing // A reporter that does nothing
Reporter reporter = Reporter.NULL; Reporter reporter = Reporter.NULL;
TextOutputFormat theOutputFormat = new TextOutputFormat(); TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter theRecordWriter = RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter); theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1"); Text key1 = new Text("key1");
@ -147,7 +145,61 @@ public class TestTextOutputFormat extends TestCase {
assertEquals(output, expectedOutput.toString()); assertEquals(output, expectedOutput.toString());
} }
/**
* test compressed file
* @throws IOException
*/
public void testCompress() throws IOException{
JobConf job = new JobConf();
String separator = "\u0001";
job.set("mapreduce.output.textoutputformat.separator", separator);
job.set(JobContext.TASK_ATTEMPT_ID, attempt);
job.set(org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.COMPRESS,"true");
FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
FileOutputFormat.setWorkOutputPath(job, workDir);
FileSystem fs = workDir.getFileSystem(job);
if (!fs.mkdirs(workDir)) {
fail("Failed to create output directory");
}
String file = "test.txt";
// A reporter that does nothing
Reporter reporter = Reporter.NULL;
TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
RecordWriter<Object,Object> theRecordWriter =
theOutputFormat.getRecordWriter(localFs, job, file, reporter);
Text key1 = new Text("key1");
Text key2 = new Text("key2");
Text val1 = new Text("val1");
Text val2 = new Text("val2");
NullWritable nullWritable = NullWritable.get();
try {
theRecordWriter.write(key1, val1);
theRecordWriter.write(null, nullWritable);
theRecordWriter.write(null, val1);
theRecordWriter.write(nullWritable, val2);
theRecordWriter.write(key2, nullWritable);
theRecordWriter.write(key1, null);
theRecordWriter.write(null, null);
theRecordWriter.write(key2, val2);
} finally {
theRecordWriter.close(reporter);
}
File expectedFile = new File(new Path(workDir, file).toString());
StringBuffer expectedOutput = new StringBuffer();
expectedOutput.append(key1).append(separator).append(val1).append("\n");
expectedOutput.append(val1).append("\n");
expectedOutput.append(val2).append("\n");
expectedOutput.append(key2).append("\n");
expectedOutput.append(key1).append("\n");
expectedOutput.append(key2).append(separator).append(val2).append("\n");
String output = UtilsForTests.slurp(expectedFile);
assertEquals(output, expectedOutput.toString());
}
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
new TestTextOutputFormat().testFormat(); new TestTextOutputFormat().testFormat();
} }

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Plugin to calculate virtual and physical memories on Linux systems.
* @deprecated
* Use {@link org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin}
* instead
*/
@Deprecated
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class LinuxMemoryCalculatorPlugin extends MemoryCalculatorPlugin {
private LinuxResourceCalculatorPlugin resourceCalculatorPlugin;
// Use everything from LinuxResourceCalculatorPlugin
public LinuxMemoryCalculatorPlugin() {
resourceCalculatorPlugin = new LinuxResourceCalculatorPlugin();
}
/** {@inheritDoc} */
@Override
public long getPhysicalMemorySize() {
return resourceCalculatorPlugin.getPhysicalMemorySize();
}
/** {@inheritDoc} */
@Override
public long getVirtualMemorySize() {
return resourceCalculatorPlugin.getVirtualMemorySize();
}
}

View File

@ -1,82 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.util.ReflectionUtils;
/**
* Plugin to calculate virtual and physical memories on the system.
* @deprecated Use
* {@link org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin}
* instead
*/
@Deprecated
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class MemoryCalculatorPlugin extends Configured {
/**
* Obtain the total size of the virtual memory present in the system.
*
* @return virtual memory size in bytes.
*/
public abstract long getVirtualMemorySize();
/**
* Obtain the total size of the physical memory present in the system.
*
* @return physical memory size bytes.
*/
public abstract long getPhysicalMemorySize();
/**
* Get the MemoryCalculatorPlugin from the class name and configure it. If
* class name is null, this method will try and return a memory calculator
* plugin available for this system.
*
* @param clazz class-name
* @param conf configure the plugin with this.
* @return MemoryCalculatorPlugin
*/
public static MemoryCalculatorPlugin getMemoryCalculatorPlugin(
Class<? extends MemoryCalculatorPlugin> clazz, Configuration conf) {
if (clazz != null) {
return ReflectionUtils.newInstance(clazz, conf);
}
// No class given, try a os specific class
try {
String osName = System.getProperty("os.name");
if (osName.startsWith("Linux")) {
return new LinuxMemoryCalculatorPlugin();
}
} catch (SecurityException se) {
// Failed to get Operating System name.
return null;
}
// Not supported on this system.
return null;
}
}

View File

@ -1,677 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.mapreduce.util;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Arrays;
import java.util.Random;
import java.util.Vector;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Shell.ExitCodeException;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import junit.framework.TestCase;
/**
* A JUnit test to test ProcfsBasedProcessTree.
*/
public class TestProcfsBasedProcessTree extends TestCase {
private static final Log LOG = LogFactory
.getLog(TestProcfsBasedProcessTree.class);
private static String TEST_ROOT_DIR = new Path(System.getProperty(
"test.build.data", "/tmp")).toString().replace(' ', '+');
private ShellCommandExecutor shexec = null;
private String pidFile, lowestDescendant;
private String shellScript;
private static final int N = 6; // Controls the RogueTask
private class RogueTaskThread extends Thread {
public void run() {
try {
Vector<String> args = new Vector<String>();
if(ProcessTree.isSetsidAvailable) {
args.add("setsid");
}
args.add("bash");
args.add("-c");
args.add(" echo $$ > " + pidFile + "; sh " +
shellScript + " " + N + ";") ;
shexec = new ShellCommandExecutor(args.toArray(new String[0]));
shexec.execute();
} catch (ExitCodeException ee) {
LOG.info("Shell Command exit with a non-zero exit code. This is" +
" expected as we are killing the subprocesses of the" +
" task intentionally. " + ee);
} catch (IOException ioe) {
LOG.info("Error executing shell command " + ioe);
} finally {
LOG.info("Exit code: " + shexec.getExitCode());
}
}
}
private String getRogueTaskPID() {
File f = new File(pidFile);
while (!f.exists()) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
break;
}
}
// read from pidFile
return getPidFromPidFile(pidFile);
}
public void testProcessTree() {
try {
if (!ProcfsBasedProcessTree.isAvailable()) {
System.out
.println("ProcfsBasedProcessTree is not available on this system. Not testing");
return;
}
} catch (Exception e) {
LOG.info(StringUtils.stringifyException(e));
return;
}
// create shell script
Random rm = new Random();
File tempFile = new File(TEST_ROOT_DIR, this.getName() + "_shellScript_" +
rm.nextInt() + ".sh");
tempFile.deleteOnExit();
shellScript = TEST_ROOT_DIR + File.separator + tempFile.getName();
// create pid file
tempFile = new File(TEST_ROOT_DIR, this.getName() + "_pidFile_" +
rm.nextInt() + ".pid");
tempFile.deleteOnExit();
pidFile = TEST_ROOT_DIR + File.separator + tempFile.getName();
lowestDescendant = TEST_ROOT_DIR + File.separator + "lowestDescendantPidFile";
// write to shell-script
try {
FileWriter fWriter = new FileWriter(shellScript);
fWriter.write(
"# rogue task\n" +
"sleep 1\n" +
"echo hello\n" +
"if [ $1 -ne 0 ]\n" +
"then\n" +
" sh " + shellScript + " $(($1-1))\n" +
"else\n" +
" echo $$ > " + lowestDescendant + "\n" +
" while true\n do\n" +
" sleep 5\n" +
" done\n" +
"fi");
fWriter.close();
} catch (IOException ioe) {
LOG.info("Error: " + ioe);
return;
}
Thread t = new RogueTaskThread();
t.start();
String pid = getRogueTaskPID();
LOG.info("Root process pid: " + pid);
ProcfsBasedProcessTree p = new ProcfsBasedProcessTree(pid,
ProcessTree.isSetsidAvailable,
ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
p.updateProcessTree(); // initialize
LOG.info("ProcessTree: " + p.toString());
File leaf = new File(lowestDescendant);
//wait till lowest descendant process of Rougue Task starts execution
while (!leaf.exists()) {
try {
Thread.sleep(500);
} catch (InterruptedException ie) {
break;
}
}
p.updateProcessTree(); // reconstruct
LOG.info("ProcessTree: " + p.toString());
// Get the process-tree dump
String processTreeDump = p.getProcessTreeDump();
// destroy the process and all its subprocesses
p.destroy(true/*in the background*/);
if(ProcessTree.isSetsidAvailable) {// whole processtree should be gone
assertEquals(false, p.isAnyProcessInTreeAlive());
}
else {// process should be gone
assertFalse("ProcessTree must have been gone", p.isAlive());
}
LOG.info("Process-tree dump follows: \n" + processTreeDump);
assertTrue("Process-tree dump doesn't start with a proper header",
processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
"USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
"RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i = N; i >= 0; i--) {
String cmdLineDump = "\\|- [0-9]+ [0-9]+ [0-9]+ [0-9]+ \\(sh\\)" +
" [0-9]+ [0-9]+ [0-9]+ [0-9]+ sh " + shellScript + " " + i;
Pattern pat = Pattern.compile(cmdLineDump);
Matcher mat = pat.matcher(processTreeDump);
assertTrue("Process-tree dump doesn't contain the cmdLineDump of " + i
+ "th process!", mat.find());
}
// Not able to join thread sometimes when forking with large N.
try {
t.join(2000);
LOG.info("RogueTaskThread successfully joined.");
} catch (InterruptedException ie) {
LOG.info("Interrupted while joining RogueTaskThread.");
}
// ProcessTree is gone now. Any further calls should be sane.
p.updateProcessTree();
assertFalse("ProcessTree must have been gone", p.isAlive());
assertTrue("Cumulative vmem for the gone-process is "
+ p.getCumulativeVmem() + " . It should be zero.", p
.getCumulativeVmem() == 0);
assertTrue(p.toString().equals("[ ]"));
}
/**
* Get PID from a pid-file.
*
* @param pidFileName
* Name of the pid-file.
* @return the PID string read from the pid-file. Returns null if the
* pidFileName points to a non-existing file or if read fails from the
* file.
*/
public static String getPidFromPidFile(String pidFileName) {
BufferedReader pidFile = null;
FileReader fReader = null;
String pid = null;
try {
fReader = new FileReader(pidFileName);
pidFile = new BufferedReader(fReader);
} catch (FileNotFoundException f) {
LOG.debug("PidFile doesn't exist : " + pidFileName);
return pid;
}
try {
pid = pidFile.readLine();
} catch (IOException i) {
LOG.error("Failed to read from " + pidFileName);
} finally {
try {
if (fReader != null) {
fReader.close();
}
try {
if (pidFile != null) {
pidFile.close();
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + pidFile);
}
} catch (IOException i) {
LOG.warn("Error closing the stream " + fReader);
}
}
return pid;
}
public static class ProcessStatInfo {
// sample stat in a single line : 3910 (gpm) S 1 3910 3910 0 -1 4194624
// 83 0 0 0 0 0 0 0 16 0 1 0 7852 2408448 88 4294967295 134512640
// 134590050 3220521392 3220520036 10975138 0 0 4096 134234626
// 4294967295 0 0 17 1 0 0
String pid;
String name;
String ppid;
String pgrpId;
String session;
String vmem = "0";
String rssmemPage = "0";
String utime = "0";
String stime = "0";
public ProcessStatInfo(String[] statEntries) {
pid = statEntries[0];
name = statEntries[1];
ppid = statEntries[2];
pgrpId = statEntries[3];
session = statEntries[4];
vmem = statEntries[5];
if (statEntries.length > 6) {
rssmemPage = statEntries[6];
}
if (statEntries.length > 7) {
utime = statEntries[7];
stime = statEntries[8];
}
}
// construct a line that mimics the procfs stat file.
// all unused numerical entries are set to 0.
public String getStatLine() {
return String.format("%s (%s) S %s %s %s 0 0 0" +
" 0 0 0 0 %s %s 0 0 0 0 0 0 0 %s %s 0 0" +
" 0 0 0 0 0 0 0 0" +
" 0 0 0 0 0",
pid, name, ppid, pgrpId, session,
utime, stime, vmem, rssmemPage);
}
}
/**
* A basic test that creates a few process directories and writes
* stat files. Verifies that the cpu time and memory is correctly
* computed.
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
public void testCpuAndMemoryForProcessTree() throws IOException {
// test processes
String[] pids = { "100", "200", "300", "400" };
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming processes 100, 200, 300 are in tree and 400 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] = new ProcessStatInfo(new String[]
{"100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
procInfos[1] = new ProcessStatInfo(new String[]
{"200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
procInfos[2] = new ProcessStatInfo(new String[]
{"300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
procInfos[3] = new ProcessStatInfo(new String[]
{"400", "proc4", "1", "400", "400", "400000", "400", "4000", "800"});
writeStatFiles(procfsRootDir, pids, procInfos);
// crank up the process tree class.
ProcfsBasedProcessTree processTree =
new ProcfsBasedProcessTree("100", true, 100L,
procfsRootDir.getAbsolutePath());
// build the process tree.
processTree.updateProcessTree();
// verify cumulative memory
assertEquals("Cumulative virtual memory does not match", 600000L,
processTree.getCumulativeVmem());
// verify rss memory
long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
600L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
assertEquals("Cumulative rss memory does not match",
cumuRssMem, processTree.getCumulativeRssmem());
// verify cumulative cpu time
long cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
7200L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
assertEquals("Cumulative cpu time does not match",
cumuCpuTime, processTree.getCumulativeCpuTime());
// test the cpu time again to see if it cumulates
procInfos[0] = new ProcessStatInfo(new String[]
{"100", "proc1", "1", "100", "100", "100000", "100", "2000", "300"});
procInfos[1] = new ProcessStatInfo(new String[]
{"200", "proc2", "100", "100", "100", "200000", "200", "3000", "500"});
writeStatFiles(procfsRootDir, pids, procInfos);
// build the process tree.
processTree.updateProcessTree();
// verify cumulative cpu time again
cumuCpuTime = ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS > 0 ?
9400L * ProcfsBasedProcessTree.JIFFY_LENGTH_IN_MILLIS : 0L;
assertEquals("Cumulative cpu time does not match",
cumuCpuTime, processTree.getCumulativeCpuTime());
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Tests that cumulative memory is computed only for
* processes older than a given age.
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
public void testMemForOlderProcesses() throws IOException {
// initial list of processes
String[] pids = { "100", "200", "300", "400" };
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
// create stat objects.
// assuming 100, 200 and 400 are in tree, 300 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[4];
procInfos[0] = new ProcessStatInfo(new String[]
{"100", "proc1", "1", "100", "100", "100000", "100"});
procInfos[1] = new ProcessStatInfo(new String[]
{"200", "proc2", "100", "100", "100", "200000", "200"});
procInfos[2] = new ProcessStatInfo(new String[]
{"300", "proc3", "1", "300", "300", "300000", "300"});
procInfos[3] = new ProcessStatInfo(new String[]
{"400", "proc4", "100", "100", "100", "400000", "400"});
writeStatFiles(procfsRootDir, pids, procInfos);
// crank up the process tree class.
ProcfsBasedProcessTree processTree =
new ProcfsBasedProcessTree("100", true, 100L,
procfsRootDir.getAbsolutePath());
// build the process tree.
processTree.updateProcessTree();
// verify cumulative memory
assertEquals("Cumulative memory does not match",
700000L, processTree.getCumulativeVmem());
// write one more process as child of 100.
String[] newPids = { "500" };
setupPidDirs(procfsRootDir, newPids);
ProcessStatInfo[] newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] = new ProcessStatInfo(new String[]
{"500", "proc5", "100", "100", "100", "500000", "500"});
writeStatFiles(procfsRootDir, newPids, newProcInfos);
// check memory includes the new process.
processTree.updateProcessTree();
assertEquals("Cumulative vmem does not include new process",
1200000L, processTree.getCumulativeVmem());
long cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
assertEquals("Cumulative rssmem does not include new process",
cumuRssMem, processTree.getCumulativeRssmem());
// however processes older than 1 iteration will retain the older value
assertEquals("Cumulative vmem shouldn't have included new process",
700000L, processTree.getCumulativeVmem(1));
cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
assertEquals("Cumulative rssmem shouldn't have included new process",
cumuRssMem, processTree.getCumulativeRssmem(1));
// one more process
newPids = new String[]{ "600" };
setupPidDirs(procfsRootDir, newPids);
newProcInfos = new ProcessStatInfo[1];
newProcInfos[0] = new ProcessStatInfo(new String[]
{"600", "proc6", "100", "100", "100", "600000", "600"});
writeStatFiles(procfsRootDir, newPids, newProcInfos);
// refresh process tree
processTree.updateProcessTree();
// processes older than 2 iterations should be same as before.
assertEquals("Cumulative vmem shouldn't have included new processes",
700000L, processTree.getCumulativeVmem(2));
cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
700L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
assertEquals("Cumulative rssmem shouldn't have included new processes",
cumuRssMem, processTree.getCumulativeRssmem(2));
// processes older than 1 iteration should not include new process,
// but include process 500
assertEquals("Cumulative vmem shouldn't have included new processes",
1200000L, processTree.getCumulativeVmem(1));
cumuRssMem = ProcfsBasedProcessTree.PAGE_SIZE > 0 ?
1200L * ProcfsBasedProcessTree.PAGE_SIZE : 0L;
assertEquals("Cumulative rssmem shouldn't have included new processes",
cumuRssMem, processTree.getCumulativeRssmem(1));
// no processes older than 3 iterations, this should be 0
assertEquals("Getting non-zero vmem for processes older than 3 iterations",
0L, processTree.getCumulativeVmem(3));
assertEquals("Getting non-zero rssmem for processes older than 3 iterations",
0L, processTree.getCumulativeRssmem(3));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Verifies ProcfsBasedProcessTree.checkPidPgrpidForMatch() in case of
* 'constructProcessInfo() returning null' by not writing stat file for the
* mock process
* @throws IOException if there was a problem setting up the
* fake procfs directories or files.
*/
public void testDestroyProcessTree() throws IOException {
// test process
String pid = "100";
// create the fake procfs root directory.
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
// crank up the process tree class.
ProcfsBasedProcessTree processTree = new ProcfsBasedProcessTree(
pid, true, 100L, procfsRootDir.getAbsolutePath());
// Let us not create stat file for pid 100.
assertTrue(ProcfsBasedProcessTree.checkPidPgrpidForMatch(
pid, procfsRootDir.getAbsolutePath()));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Test the correctness of process-tree dump.
*
* @throws IOException
*/
public void testProcessTreeDump()
throws IOException {
String[] pids = { "100", "200", "300", "400", "500", "600" };
File procfsRootDir = new File(TEST_ROOT_DIR, "proc");
try {
setupProcfsRootDir(procfsRootDir);
setupPidDirs(procfsRootDir, pids);
int numProcesses = pids.length;
// Processes 200, 300, 400 and 500 are descendants of 100. 600 is not.
ProcessStatInfo[] procInfos = new ProcessStatInfo[numProcesses];
procInfos[0] = new ProcessStatInfo(new String[] {
"100", "proc1", "1", "100", "100", "100000", "100", "1000", "200"});
procInfos[1] = new ProcessStatInfo(new String[] {
"200", "proc2", "100", "100", "100", "200000", "200", "2000", "400"});
procInfos[2] = new ProcessStatInfo(new String[] {
"300", "proc3", "200", "100", "100", "300000", "300", "3000", "600"});
procInfos[3] = new ProcessStatInfo(new String[] {
"400", "proc4", "200", "100", "100", "400000", "400", "4000", "800"});
procInfos[4] = new ProcessStatInfo(new String[] {
"500", "proc5", "400", "100", "100", "400000", "400", "4000", "800"});
procInfos[5] = new ProcessStatInfo(new String[] {
"600", "proc6", "1", "1", "1", "400000", "400", "4000", "800"});
String[] cmdLines = new String[numProcesses];
cmdLines[0] = "proc1 arg1 arg2";
cmdLines[1] = "proc2 arg3 arg4";
cmdLines[2] = "proc3 arg5 arg6";
cmdLines[3] = "proc4 arg7 arg8";
cmdLines[4] = "proc5 arg9 arg10";
cmdLines[5] = "proc6 arg11 arg12";
writeStatFiles(procfsRootDir, pids, procInfos);
writeCmdLineFiles(procfsRootDir, pids, cmdLines);
ProcfsBasedProcessTree processTree =
new ProcfsBasedProcessTree("100", true, 100L, procfsRootDir
.getAbsolutePath());
// build the process tree.
processTree.updateProcessTree();
// Get the process-tree dump
String processTreeDump = processTree.getProcessTreeDump();
LOG.info("Process-tree dump follows: \n" + processTreeDump);
assertTrue("Process-tree dump doesn't start with a proper header",
processTreeDump.startsWith("\t|- PID PPID PGRPID SESSID CMD_NAME " +
"USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " +
"RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n"));
for (int i = 0; i < 5; i++) {
ProcessStatInfo p = procInfos[i];
assertTrue(
"Process-tree dump doesn't contain the cmdLineDump of process "
+ p.pid, processTreeDump.contains("\t|- " + p.pid + " "
+ p.ppid + " " + p.pgrpId + " " + p.session + " (" + p.name
+ ") " + p.utime + " " + p.stime + " " + p.vmem + " "
+ p.rssmemPage + " " + cmdLines[i]));
}
// 600 should not be in the dump
ProcessStatInfo p = procInfos[5];
assertFalse(
"Process-tree dump shouldn't contain the cmdLineDump of process "
+ p.pid, processTreeDump.contains("\t|- " + p.pid + " " + p.ppid
+ " " + p.pgrpId + " " + p.session + " (" + p.name + ") "
+ p.utime + " " + p.stime + " " + p.vmem + " " + cmdLines[5]));
} finally {
FileUtil.fullyDelete(procfsRootDir);
}
}
/**
* Create a directory to mimic the procfs file system's root.
* @param procfsRootDir root directory to create.
* @throws IOException if could not delete the procfs root directory
*/
public static void setupProcfsRootDir(File procfsRootDir) {
// cleanup any existing process root dir.
if (procfsRootDir.exists()) {
assertTrue(FileUtil.fullyDelete(procfsRootDir));
}
// create afresh
assertTrue(procfsRootDir.mkdirs());
}
/**
* Create PID directories under the specified procfs root directory
* @param procfsRootDir root directory of procfs file system
* @param pids the PID directories to create.
* @throws IOException If PID dirs could not be created
*/
public static void setupPidDirs(File procfsRootDir, String[] pids)
throws IOException {
for (String pid : pids) {
File pidDir = new File(procfsRootDir, pid);
pidDir.mkdir();
if (!pidDir.exists()) {
throw new IOException ("couldn't make process directory under " +
"fake procfs");
} else {
LOG.info("created pid dir");
}
}
}
/**
* Write stat files under the specified pid directories with data
* setup in the corresponding ProcessStatInfo objects
* @param procfsRootDir root directory of procfs file system
* @param pids the PID directories under which to create the stat file
* @param procs corresponding ProcessStatInfo objects whose data should be
* written to the stat files.
* @throws IOException if stat files could not be written
*/
public static void writeStatFiles(File procfsRootDir, String[] pids,
ProcessStatInfo[] procs) throws IOException {
for (int i=0; i<pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_STAT_FILE);
BufferedWriter bw = null;
try {
FileWriter fw = new FileWriter(statFile);
bw = new BufferedWriter(fw);
bw.write(procs[i].getStatLine());
LOG.info("wrote stat file for " + pids[i] +
" with contents: " + procs[i].getStatLine());
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
private static void writeCmdLineFiles(File procfsRootDir, String[] pids,
String[] cmdLines)
throws IOException {
for (int i = 0; i < pids.length; i++) {
File statFile =
new File(new File(procfsRootDir, pids[i]),
ProcfsBasedProcessTree.PROCFS_CMDLINE_FILE);
BufferedWriter bw = null;
try {
bw = new BufferedWriter(new FileWriter(statFile));
bw.write(cmdLines[i]);
LOG.info("wrote command-line file for " + pids[i] + " with contents: "
+ cmdLines[i]);
} finally {
// not handling exception - will throw an error and fail the test.
if (bw != null) {
bw.close();
}
}
}
}
}

View File

@ -37,11 +37,11 @@ import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.tools.rumen.JobStory; import org.apache.hadoop.tools.rumen.JobStory;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.tools.rumen.TaskInfo; import org.apache.hadoop.tools.rumen.TaskInfo;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;

View File

@ -22,8 +22,8 @@ import java.util.Random;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive; import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* <p>A {@link ResourceUsageEmulatorPlugin} that emulates the cumulative CPU * <p>A {@link ResourceUsageEmulatorPlugin} that emulates the cumulative CPU
@ -166,7 +166,7 @@ implements ResourceUsageEmulatorPlugin {
*/ */
public void calibrate(ResourceCalculatorPlugin monitor, public void calibrate(ResourceCalculatorPlugin monitor,
long totalCpuUsage) { long totalCpuUsage) {
long initTime = monitor.getProcResourceValues().getCumulativeCpuTime(); long initTime = monitor.getCumulativeCpuTime();
long defaultLoopSize = 0; long defaultLoopSize = 0;
long finalTime = initTime; long finalTime = initTime;
@ -175,7 +175,7 @@ implements ResourceUsageEmulatorPlugin {
while (finalTime - initTime < 100) { // 100 ms while (finalTime - initTime < 100) { // 100 ms
++defaultLoopSize; ++defaultLoopSize;
performUnitComputation(); //perform unit computation performUnitComputation(); //perform unit computation
finalTime = monitor.getProcResourceValues().getCumulativeCpuTime(); finalTime = monitor.getCumulativeCpuTime();
} }
long referenceRuntime = finalTime - initTime; long referenceRuntime = finalTime - initTime;
@ -230,7 +230,7 @@ implements ResourceUsageEmulatorPlugin {
} }
private synchronized long getCurrentCPUUsage() { private synchronized long getCurrentCPUUsage() {
return monitor.getProcResourceValues().getCumulativeCpuTime(); return monitor.getCumulativeCpuTime();
} }
@Override @Override

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.mapred.gridmix.emulators.resourceusage;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.mapred.gridmix.Progressive; import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;

View File

@ -23,9 +23,9 @@ import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive; import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* <p>This is the driver class for managing all the resource usage emulators. * <p>This is the driver class for managing all the resource usage emulators.

View File

@ -21,8 +21,8 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.gridmix.Progressive; import org.apache.hadoop.mapred.gridmix.Progressive;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* <p>A {@link ResourceUsageEmulatorPlugin} that emulates the total heap * <p>A {@link ResourceUsageEmulatorPlugin} that emulates the total heap

View File

@ -16,18 +16,17 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.mapred; package org.apache.hadoop.mapred.gridmix;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* Plugin class to test resource information reported by TT. Use * Plugin class to test resource information reported by NM. Use configuration
* configuration items {@link #MAXVMEM_TESTING_PROPERTY} and * items {@link #MAXVMEM_TESTING_PROPERTY} and {@link #MAXPMEM_TESTING_PROPERTY}
* {@link #MAXPMEM_TESTING_PROPERTY} to tell TT the total vmem and the total * to tell NM the total vmem and the total pmem. Use configuration items
* pmem. Use configuration items {@link #NUM_PROCESSORS}, * {@link #NUM_PROCESSORS}, {@link #CPU_FREQUENCY}, {@link #CUMULATIVE_CPU_TIME}
* {@link #CPU_FREQUENCY}, {@link #CUMULATIVE_CPU_TIME} and {@link #CPU_USAGE} * and {@link #CPU_USAGE} to tell TT the CPU information.
* to tell TT the CPU information.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin { public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
@ -48,15 +47,14 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
public static final String CUMULATIVE_CPU_TIME = public static final String CUMULATIVE_CPU_TIME =
"mapred.tasktracker.cumulativecputime.testing"; "mapred.tasktracker.cumulativecputime.testing";
/** CPU usage percentage for testing */ /** CPU usage percentage for testing */
public static final String CPU_USAGE = public static final String CPU_USAGE = "mapred.tasktracker.cpuusage.testing";
"mapred.tasktracker.cpuusage.testing";
/** process cumulative CPU usage time for testing */ /** process cumulative CPU usage time for testing */
public static final String PROC_CUMULATIVE_CPU_TIME = public static final String PROC_CUMULATIVE_CPU_TIME =
"mapred.tasktracker.proccumulativecputime.testing"; "mapred.tasktracker.proccumulativecputime.testing";
/** process pmem for testing*/ /** process pmem for testing */
public static final String PROC_PMEM_TESTING_PROPERTY = public static final String PROC_PMEM_TESTING_PROPERTY =
"mapred.tasktracker.procpmem.testing"; "mapred.tasktracker.procpmem.testing";
/** process vmem for testing*/ /** process vmem for testing */
public static final String PROC_VMEM_TESTING_PROPERTY = public static final String PROC_VMEM_TESTING_PROPERTY =
"mapred.tasktracker.procvmem.testing"; "mapred.tasktracker.procvmem.testing";
@ -107,12 +105,4 @@ public class DummyResourceCalculatorPlugin extends ResourceCalculatorPlugin {
public float getCpuUsage() { public float getCpuUsage() {
return getConf().getFloat(CPU_USAGE, -1); return getConf().getFloat(CPU_USAGE, -1);
} }
@Override
public ProcResourceValues getProcResourceValues() {
long cpuTime = getConf().getLong(PROC_CUMULATIVE_CPU_TIME, -1);
long pMem = getConf().getLong(PROC_PMEM_TESTING_PROPERTY, -1);
long vMem = getConf().getLong(PROC_VMEM_TESTING_PROPERTY, -1);
return new ProcResourceValues(cpuTime, pMem, vMem);
}
} }

View File

@ -23,7 +23,6 @@ import static org.junit.Assert.*;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.DummyResourceCalculatorPlugin;
import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.gridmix.DebugJobProducer.MockJob; import org.apache.hadoop.mapred.gridmix.DebugJobProducer.MockJob;
import org.apache.hadoop.mapred.gridmix.TestHighRamJob.DummyGridmixJob; import org.apache.hadoop.mapred.gridmix.TestHighRamJob.DummyGridmixJob;
@ -32,8 +31,8 @@ import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.TotalHeapUsageEm
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.TotalHeapUsageEmulatorPlugin.DefaultHeapUsageEmulator; import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.TotalHeapUsageEmulatorPlugin.DefaultHeapUsageEmulator;
import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* Test Gridmix memory emulation. * Test Gridmix memory emulation.

View File

@ -31,14 +31,13 @@ import org.apache.hadoop.mapreduce.TaskInputOutputContext;
import org.apache.hadoop.mapreduce.TaskType; import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig;
import org.apache.hadoop.mapreduce.task.MapContextImpl; import org.apache.hadoop.mapreduce.task.MapContextImpl;
import org.apache.hadoop.mapreduce.util.ResourceCalculatorPlugin;
import org.apache.hadoop.tools.rumen.ResourceUsageMetrics; import org.apache.hadoop.tools.rumen.ResourceUsageMetrics;
import org.apache.hadoop.mapred.DummyResourceCalculatorPlugin;
import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner; import org.apache.hadoop.mapred.gridmix.LoadJob.ResourceUsageMatcherRunner;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin; import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageEmulatorPlugin; import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageEmulatorPlugin;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher; import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.ResourceUsageMatcher;
import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin.DefaultCpuUsageEmulator; import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.CumulativeCpuUsageEmulatorPlugin.DefaultCpuUsageEmulator;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
/** /**
* Test Gridmix's resource emulator framework and supported plugins. * Test Gridmix's resource emulator framework and supported plugins.
@ -242,16 +241,6 @@ public class TestResourceUsageEmulators {
public long getCumulativeCpuTime() { public long getCumulativeCpuTime() {
return core.getCpuUsage(); return core.getCpuUsage();
} }
/**
* Returns a {@link ProcResourceValues} with cumulative cpu usage
* computed using {@link #getCumulativeCpuTime()}.
*/
@Override
public ProcResourceValues getProcResourceValues() {
long usageValue = getCumulativeCpuTime();
return new ProcResourceValues(usageValue, -1, -1);
}
} }
/** /**

View File

@ -68,6 +68,7 @@ public class TestStreamReduceNone
"-reducer", "org.apache.hadoop.mapred.lib.IdentityReducer", "-reducer", "org.apache.hadoop.mapred.lib.IdentityReducer",
"-numReduceTasks", "0", "-numReduceTasks", "0",
"-jobconf", "mapreduce.task.files.preserve.failedtasks=true", "-jobconf", "mapreduce.task.files.preserve.failedtasks=true",
"-jobconf", "mapreduce.job.maps=1",
"-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp") "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
}; };
} }

View File

@ -54,6 +54,8 @@ public class TestStreamXmlRecordReader extends TestStreaming {
protected String[] genArgs() { protected String[] genArgs() {
args.add("-inputreader"); args.add("-inputreader");
args.add("StreamXmlRecordReader,begin=<xmltag>,end=</xmltag>"); args.add("StreamXmlRecordReader,begin=<xmltag>,end=</xmltag>");
args.add("-jobconf");
args.add("mapreduce.job.maps=1");
return super.genArgs(); return super.genArgs();
} }
} }

View File

@ -62,6 +62,10 @@ Release 2.0.5-beta - UNRELEASED
YARN-396. Rationalize AllocateResponse in RM Scheduler API. (Zhijie Shen YARN-396. Rationalize AllocateResponse in RM Scheduler API. (Zhijie Shen
via hitesh) via hitesh)
YARN-439. Flatten NodeHeartbeatResponse. (Xuan Gong via sseth)
YARN-440. Flatten RegisterNodeManagerResponse. (Xuan Gong via sseth)
NEW FEATURES NEW FEATURES
IMPROVEMENTS IMPROVEMENTS
@ -91,6 +95,14 @@ Release 2.0.5-beta - UNRELEASED
YARN-417. Create AMRMClient wrapper that provides asynchronous callbacks. YARN-417. Create AMRMClient wrapper that provides asynchronous callbacks.
(Sandy Ryza via bikas) (Sandy Ryza via bikas)
YARN-497. Yarn unmanaged-am launcher jar does not define a main class in
its manifest (Hitesh Shah via bikas)
YARN-469. Make scheduling mode in FS pluggable. (kkambatl via tucu)
YARN-450. Define value for * in the scheduling protocol (Zhijie Shen via
bikas)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -125,6 +137,30 @@ Release 2.0.5-beta - UNRELEASED
YARN-470. Support a way to disable resource monitoring on the NodeManager. YARN-470. Support a way to disable resource monitoring on the NodeManager.
(Siddharth Seth via hitesh) (Siddharth Seth via hitesh)
YARN-71. Fix the NodeManager to clean up local-dirs on restart.
(Xuan Gong via sseth)
YARN-378. Fix RM to make the AM max attempts/retries to be configurable
per application by clients. (Zhijie Shen via vinodkv)
YARN-498. Unmanaged AM launcher does not set various constants in env for
an AM, also does not handle failed AMs properly. (Hitesh Shah via bikas)
YARN-474. Fix CapacityScheduler to trigger application-activation when
am-resource-percent configuration is refreshed. (Zhijie Shen via vinodkv)
YARN-496. Fair scheduler configs are refreshed inconsistently in
reinitialize. (Sandy Ryza via tomwhite)
YARN-209. Fix CapacityScheduler to trigger application-activation when
the cluster capacity changes. (Zhijie Shen via vinodkv)
YARN-24. Nodemanager fails to start if log aggregation enabled and
namenode unavailable. (sandyr via tucu)
YARN-515. Node Manager not getting the master key. (Robert Joseph Evans
via jlowe)
Release 2.0.4-alpha - UNRELEASED Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -463,6 +499,12 @@ Release 0.23.7 - UNRELEASED
YARN-345. Many InvalidStateTransitonException errors for ApplicationImpl YARN-345. Many InvalidStateTransitonException errors for ApplicationImpl
in Node Manager (Robert Parker via jlowe) in Node Manager (Robert Parker via jlowe)
YARN-109. .tmp file is not deleted for localized archives (Mayank Bansal
via bobby)
YARN-460. CS user left in list of active users for the queue even when
application finished (tgraves)
Release 0.23.6 - UNRELEASED Release 0.23.6 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -92,6 +92,12 @@ public interface ApplicationConstants {
public static final String STDOUT = "stdout"; public static final String STDOUT = "stdout";
/**
* The environment variable for MAX_APP_ATTEMPTS. Set in AppMaster environment
* only
*/
public static final String MAX_APP_ATTEMPTS_ENV = "MAX_APP_ATTEMPTS";
/** /**
* Environment for Applications. * Environment for Applications.
* *

View File

@ -189,4 +189,22 @@ public interface ApplicationSubmissionContext {
@LimitedPrivate("mapreduce") @LimitedPrivate("mapreduce")
@Unstable @Unstable
public void setCancelTokensWhenComplete(boolean cancel); public void setCancelTokensWhenComplete(boolean cancel);
/**
* @return the number of max attempts of the application to be submitted
*/
@Public
@Unstable
public int getMaxAppAttempts();
/**
* Set the number of max attempts of the application to be submitted. WARNING:
* it should be no larger than the global number of max attempts in the Yarn
* configuration.
* @param maxAppAttempts the number of max attempts of the application
* to be submitted.
*/
@Public
@Unstable
public void setMaxAppAttempts(int maxAppAttempts);
} }

View File

@ -49,6 +49,26 @@ import org.apache.hadoop.yarn.api.AMRMProtocol;
@Public @Public
@Stable @Stable
public abstract class ResourceRequest implements Comparable<ResourceRequest> { public abstract class ResourceRequest implements Comparable<ResourceRequest> {
/**
* The constant string representing no locality.
* It should be used by all references that want to pass an arbitrary host
* name in.
*/
public static final String ANY = "*";
/**
* Check whether the given <em>host/rack</em> string represents an arbitrary
* host name.
*
* @param hostName <em>host/rack</em> on which the allocation is desired
* @return whether the given <em>host/rack</em> string represents an arbitrary
* host name
*/
public static boolean isAnyLocation(String hostName) {
return ANY.equals(hostName);
}
/** /**
* Get the <code>Priority</code> of the request. * Get the <code>Priority</code> of the request.
* @return <code>Priority</code> of the request * @return <code>Priority</code> of the request

Some files were not shown because too many files have changed in this diff Show More